2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/cache.h>
12 #include <linux/capability.h>
13 #include <linux/skbuff.h>
14 #include <linux/kmod.h>
15 #include <linux/vmalloc.h>
16 #include <linux/netdevice.h>
17 #include <linux/module.h>
18 #include <linux/icmp.h>
20 #include <net/compat.h>
21 #include <asm/uaccess.h>
22 #include <linux/mutex.h>
23 #include <linux/proc_fs.h>
24 #include <linux/err.h>
25 #include <linux/cpumask.h>
27 #include <linux/netfilter/x_tables.h>
28 #include <linux/netfilter_ipv4/ip_tables.h>
30 MODULE_LICENSE("GPL");
31 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
32 MODULE_DESCRIPTION("IPv4 packet filter");
34 /*#define DEBUG_IP_FIREWALL*/
35 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
36 /*#define DEBUG_IP_FIREWALL_USER*/
38 #ifdef DEBUG_IP_FIREWALL
39 #define dprintf(format, args...) printk(format , ## args)
41 #define dprintf(format, args...)
44 #ifdef DEBUG_IP_FIREWALL_USER
45 #define duprintf(format, args...) printk(format , ## args)
47 #define duprintf(format, args...)
50 #ifdef CONFIG_NETFILTER_DEBUG
51 #define IP_NF_ASSERT(x) \
54 printk("IP_NF_ASSERT: %s:%s:%u\n", \
55 __FUNCTION__, __FILE__, __LINE__); \
58 #define IP_NF_ASSERT(x)
62 /* All the better to debug you with... */
68 We keep a set of rules for each CPU, so we can avoid write-locking
69 them in the softirq when updating the counters and therefore
70 only need to read-lock in the softirq; doing a write_lock_bh() in user
71 context stops packets coming through and allows user context to read
72 the counters or update the rules.
74 Hence the start of any table is given by get_table() below. */
76 /* Returns whether matches rule or not. */
78 ip_packet_match(const struct iphdr *ip,
81 const struct ipt_ip *ipinfo,
87 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
89 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
91 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
93 dprintf("Source or dest mismatch.\n");
95 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
97 NIPQUAD(ipinfo->smsk.s_addr),
98 NIPQUAD(ipinfo->src.s_addr),
99 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
100 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
102 NIPQUAD(ipinfo->dmsk.s_addr),
103 NIPQUAD(ipinfo->dst.s_addr),
104 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
108 /* Look for ifname matches; this should unroll nicely. */
109 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
110 ret |= (((const unsigned long *)indev)[i]
111 ^ ((const unsigned long *)ipinfo->iniface)[i])
112 & ((const unsigned long *)ipinfo->iniface_mask)[i];
115 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
116 dprintf("VIA in mismatch (%s vs %s).%s\n",
117 indev, ipinfo->iniface,
118 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
122 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
123 ret |= (((const unsigned long *)outdev)[i]
124 ^ ((const unsigned long *)ipinfo->outiface)[i])
125 & ((const unsigned long *)ipinfo->outiface_mask)[i];
128 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
129 dprintf("VIA out mismatch (%s vs %s).%s\n",
130 outdev, ipinfo->outiface,
131 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
135 /* Check specific protocol */
137 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
138 dprintf("Packet protocol %hi does not match %hi.%s\n",
139 ip->protocol, ipinfo->proto,
140 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
144 /* If we have a fragment rule but the packet is not a fragment
145 * then we return zero */
146 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
147 dprintf("Fragment rule but not fragment.%s\n",
148 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
156 ip_checkentry(const struct ipt_ip *ip)
158 if (ip->flags & ~IPT_F_MASK) {
159 duprintf("Unknown flag bits set: %08X\n",
160 ip->flags & ~IPT_F_MASK);
163 if (ip->invflags & ~IPT_INV_MASK) {
164 duprintf("Unknown invflag bits set: %08X\n",
165 ip->invflags & ~IPT_INV_MASK);
172 ipt_error(struct sk_buff *skb,
173 const struct net_device *in,
174 const struct net_device *out,
175 unsigned int hooknum,
176 const struct xt_target *target,
177 const void *targinfo)
180 printk("ip_tables: error: `%s'\n", (char *)targinfo);
186 bool do_match(struct ipt_entry_match *m,
187 const struct sk_buff *skb,
188 const struct net_device *in,
189 const struct net_device *out,
193 /* Stop iteration if it doesn't match */
194 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
195 offset, ip_hdrlen(skb), hotdrop))
201 static inline struct ipt_entry *
202 get_entry(void *base, unsigned int offset)
204 return (struct ipt_entry *)(base + offset);
207 /* All zeroes == unconditional rule. */
209 unconditional(const struct ipt_ip *ip)
213 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
214 if (((__u32 *)ip)[i])
220 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
221 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
222 static const char *hooknames[] = {
223 [NF_INET_PRE_ROUTING] = "PREROUTING",
224 [NF_INET_LOCAL_IN] = "INPUT",
225 [NF_INET_FORWARD] = "FORWARD",
226 [NF_INET_LOCAL_OUT] = "OUTPUT",
227 [NF_INET_POST_ROUTING] = "POSTROUTING",
230 enum nf_ip_trace_comments {
231 NF_IP_TRACE_COMMENT_RULE,
232 NF_IP_TRACE_COMMENT_RETURN,
233 NF_IP_TRACE_COMMENT_POLICY,
236 static const char *comments[] = {
237 [NF_IP_TRACE_COMMENT_RULE] = "rule",
238 [NF_IP_TRACE_COMMENT_RETURN] = "return",
239 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
242 static struct nf_loginfo trace_loginfo = {
243 .type = NF_LOG_TYPE_LOG,
247 .logflags = NF_LOG_MASK,
253 get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
254 char *hookname, char **chainname,
255 char **comment, unsigned int *rulenum)
257 struct ipt_standard_target *t = (void *)ipt_get_target(s);
259 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) {
260 /* Head of user chain: ERROR target with chainname */
261 *chainname = t->target.data;
266 if (s->target_offset == sizeof(struct ipt_entry)
267 && strcmp(t->target.u.kernel.target->name,
268 IPT_STANDARD_TARGET) == 0
270 && unconditional(&s->ip)) {
271 /* Tail of chains: STANDARD target (return/policy) */
272 *comment = *chainname == hookname
273 ? (char *)comments[NF_IP_TRACE_COMMENT_POLICY]
274 : (char *)comments[NF_IP_TRACE_COMMENT_RETURN];
283 static void trace_packet(struct sk_buff *skb,
285 const struct net_device *in,
286 const struct net_device *out,
288 struct xt_table_info *private,
292 struct ipt_entry *root;
293 char *hookname, *chainname, *comment;
294 unsigned int rulenum = 0;
296 table_base = (void *)private->entries[smp_processor_id()];
297 root = get_entry(table_base, private->hook_entry[hook]);
299 hookname = chainname = (char *)hooknames[hook];
300 comment = (char *)comments[NF_IP_TRACE_COMMENT_RULE];
302 IPT_ENTRY_ITERATE(root,
303 private->size - private->hook_entry[hook],
304 get_chainname_rulenum,
305 e, hookname, &chainname, &comment, &rulenum);
307 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
308 "TRACE: %s:%s:%s:%u ",
309 tablename, chainname, comment, rulenum);
313 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
315 ipt_do_table(struct sk_buff *skb,
317 const struct net_device *in,
318 const struct net_device *out,
319 struct xt_table *table)
321 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
325 bool hotdrop = false;
326 /* Initializing verdict to NF_DROP keeps gcc happy. */
327 unsigned int verdict = NF_DROP;
328 const char *indev, *outdev;
330 struct ipt_entry *e, *back;
331 struct xt_table_info *private;
335 datalen = skb->len - ip->ihl * 4;
336 indev = in ? in->name : nulldevname;
337 outdev = out ? out->name : nulldevname;
338 /* We handle fragments by dealing with the first fragment as
339 * if it was a normal packet. All other fragments are treated
340 * normally, except that they will NEVER match rules that ask
341 * things we don't know, ie. tcp syn flag or ports). If the
342 * rule is also a fragment-specific rule, non-fragments won't
344 offset = ntohs(ip->frag_off) & IP_OFFSET;
346 read_lock_bh(&table->lock);
347 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
348 private = table->private;
349 table_base = (void *)private->entries[smp_processor_id()];
350 e = get_entry(table_base, private->hook_entry[hook]);
352 /* For return from builtin chain */
353 back = get_entry(table_base, private->underflow[hook]);
358 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
359 struct ipt_entry_target *t;
361 if (IPT_MATCH_ITERATE(e, do_match,
363 offset, &hotdrop) != 0)
366 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
368 t = ipt_get_target(e);
369 IP_NF_ASSERT(t->u.kernel.target);
371 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
372 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
373 /* The packet is traced: log it */
374 if (unlikely(skb->nf_trace))
375 trace_packet(skb, hook, in, out,
376 table->name, private, e);
378 /* Standard target? */
379 if (!t->u.kernel.target->target) {
382 v = ((struct ipt_standard_target *)t)->verdict;
384 /* Pop from stack? */
385 if (v != IPT_RETURN) {
386 verdict = (unsigned)(-v) - 1;
390 back = get_entry(table_base,
394 if (table_base + v != (void *)e + e->next_offset
395 && !(e->ip.flags & IPT_F_GOTO)) {
396 /* Save old back ptr in next entry */
397 struct ipt_entry *next
398 = (void *)e + e->next_offset;
400 = (void *)back - table_base;
401 /* set back pointer to next entry */
405 e = get_entry(table_base, v);
407 /* Targets which reenter must return
409 #ifdef CONFIG_NETFILTER_DEBUG
410 ((struct ipt_entry *)table_base)->comefrom
413 verdict = t->u.kernel.target->target(skb,
419 #ifdef CONFIG_NETFILTER_DEBUG
420 if (((struct ipt_entry *)table_base)->comefrom
422 && verdict == IPT_CONTINUE) {
423 printk("Target %s reentered!\n",
424 t->u.kernel.target->name);
427 ((struct ipt_entry *)table_base)->comefrom
430 /* Target might have changed stuff. */
432 datalen = skb->len - ip->ihl * 4;
434 if (verdict == IPT_CONTINUE)
435 e = (void *)e + e->next_offset;
443 e = (void *)e + e->next_offset;
447 read_unlock_bh(&table->lock);
449 #ifdef DEBUG_ALLOW_ALL
458 /* Figures out from what hook each rule can be called: returns 0 if
459 there are loops. Puts hook bitmask in comefrom. */
461 mark_source_chains(struct xt_table_info *newinfo,
462 unsigned int valid_hooks, void *entry0)
466 /* No recursion; use packet counter to save back ptrs (reset
467 to 0 as we leave), and comefrom to save source hook bitmask */
468 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
469 unsigned int pos = newinfo->hook_entry[hook];
471 = (struct ipt_entry *)(entry0 + pos);
473 if (!(valid_hooks & (1 << hook)))
476 /* Set initial back pointer. */
477 e->counters.pcnt = pos;
480 struct ipt_standard_target *t
481 = (void *)ipt_get_target(e);
482 int visited = e->comefrom & (1 << hook);
484 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
485 printk("iptables: loop hook %u pos %u %08X.\n",
486 hook, pos, e->comefrom);
490 |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
492 /* Unconditional return/END. */
493 if ((e->target_offset == sizeof(struct ipt_entry)
494 && (strcmp(t->target.u.user.name,
495 IPT_STANDARD_TARGET) == 0)
497 && unconditional(&e->ip)) || visited) {
498 unsigned int oldpos, size;
500 if (t->verdict < -NF_MAX_VERDICT - 1) {
501 duprintf("mark_source_chains: bad "
502 "negative verdict (%i)\n",
507 /* Return: backtrack through the last
510 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
511 #ifdef DEBUG_IP_FIREWALL_USER
513 & (1 << NF_INET_NUMHOOKS)) {
514 duprintf("Back unset "
521 pos = e->counters.pcnt;
522 e->counters.pcnt = 0;
524 /* We're at the start. */
528 e = (struct ipt_entry *)
530 } while (oldpos == pos + e->next_offset);
533 size = e->next_offset;
534 e = (struct ipt_entry *)
535 (entry0 + pos + size);
536 e->counters.pcnt = pos;
539 int newpos = t->verdict;
541 if (strcmp(t->target.u.user.name,
542 IPT_STANDARD_TARGET) == 0
544 if (newpos > newinfo->size -
545 sizeof(struct ipt_entry)) {
546 duprintf("mark_source_chains: "
547 "bad verdict (%i)\n",
551 /* This a jump; chase it. */
552 duprintf("Jump rule %u -> %u\n",
555 /* ... this is a fallthru */
556 newpos = pos + e->next_offset;
558 e = (struct ipt_entry *)
560 e->counters.pcnt = pos;
565 duprintf("Finished chain %u\n", hook);
571 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
573 if (i && (*i)-- == 0)
576 if (m->u.kernel.match->destroy)
577 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
578 module_put(m->u.kernel.match->me);
583 check_entry(struct ipt_entry *e, const char *name)
585 struct ipt_entry_target *t;
587 if (!ip_checkentry(&e->ip)) {
588 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
592 if (e->target_offset + sizeof(struct ipt_entry_target) > e->next_offset)
595 t = ipt_get_target(e);
596 if (e->target_offset + t->u.target_size > e->next_offset)
602 static inline int check_match(struct ipt_entry_match *m, const char *name,
603 const struct ipt_ip *ip,
604 unsigned int hookmask, unsigned int *i)
606 struct xt_match *match;
609 match = m->u.kernel.match;
610 ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
611 name, hookmask, ip->proto,
612 ip->invflags & IPT_INV_PROTO);
613 if (!ret && m->u.kernel.match->checkentry
614 && !m->u.kernel.match->checkentry(name, ip, match, m->data,
616 duprintf("ip_tables: check failed for `%s'.\n",
617 m->u.kernel.match->name);
626 find_check_match(struct ipt_entry_match *m,
628 const struct ipt_ip *ip,
629 unsigned int hookmask,
632 struct xt_match *match;
635 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
637 "ipt_%s", m->u.user.name);
638 if (IS_ERR(match) || !match) {
639 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
640 return match ? PTR_ERR(match) : -ENOENT;
642 m->u.kernel.match = match;
644 ret = check_match(m, name, ip, hookmask, i);
650 module_put(m->u.kernel.match->me);
654 static inline int check_target(struct ipt_entry *e, const char *name)
656 struct ipt_entry_target *t;
657 struct xt_target *target;
660 t = ipt_get_target(e);
661 target = t->u.kernel.target;
662 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
663 name, e->comefrom, e->ip.proto,
664 e->ip.invflags & IPT_INV_PROTO);
665 if (!ret && t->u.kernel.target->checkentry
666 && !t->u.kernel.target->checkentry(name, e, target, t->data,
668 duprintf("ip_tables: check failed for `%s'.\n",
669 t->u.kernel.target->name);
676 find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
679 struct ipt_entry_target *t;
680 struct xt_target *target;
684 ret = check_entry(e, name);
689 ret = IPT_MATCH_ITERATE(e, find_check_match, name, &e->ip,
692 goto cleanup_matches;
694 t = ipt_get_target(e);
695 target = try_then_request_module(xt_find_target(AF_INET,
698 "ipt_%s", t->u.user.name);
699 if (IS_ERR(target) || !target) {
700 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
701 ret = target ? PTR_ERR(target) : -ENOENT;
702 goto cleanup_matches;
704 t->u.kernel.target = target;
706 ret = check_target(e, name);
713 module_put(t->u.kernel.target->me);
715 IPT_MATCH_ITERATE(e, cleanup_match, &j);
720 check_entry_size_and_hooks(struct ipt_entry *e,
721 struct xt_table_info *newinfo,
723 unsigned char *limit,
724 const unsigned int *hook_entries,
725 const unsigned int *underflows,
730 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
731 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
732 duprintf("Bad offset %p\n", e);
737 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
738 duprintf("checking: element %p size %u\n",
743 /* Check hooks & underflows */
744 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
745 if ((unsigned char *)e - base == hook_entries[h])
746 newinfo->hook_entry[h] = hook_entries[h];
747 if ((unsigned char *)e - base == underflows[h])
748 newinfo->underflow[h] = underflows[h];
751 /* FIXME: underflows must be unconditional, standard verdicts
752 < 0 (not IPT_RETURN). --RR */
754 /* Clear counters and comefrom */
755 e->counters = ((struct xt_counters) { 0, 0 });
763 cleanup_entry(struct ipt_entry *e, unsigned int *i)
765 struct ipt_entry_target *t;
767 if (i && (*i)-- == 0)
770 /* Cleanup all matches */
771 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
772 t = ipt_get_target(e);
773 if (t->u.kernel.target->destroy)
774 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
775 module_put(t->u.kernel.target->me);
779 /* Checks and translates the user-supplied table segment (held in
782 translate_table(const char *name,
783 unsigned int valid_hooks,
784 struct xt_table_info *newinfo,
788 const unsigned int *hook_entries,
789 const unsigned int *underflows)
794 newinfo->size = size;
795 newinfo->number = number;
797 /* Init all hooks to impossible value. */
798 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
799 newinfo->hook_entry[i] = 0xFFFFFFFF;
800 newinfo->underflow[i] = 0xFFFFFFFF;
803 duprintf("translate_table: size %u\n", newinfo->size);
805 /* Walk through entries, checking offsets. */
806 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
807 check_entry_size_and_hooks,
811 hook_entries, underflows, &i);
816 duprintf("translate_table: %u not %u entries\n",
821 /* Check hooks all assigned */
822 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
823 /* Only hooks which are valid */
824 if (!(valid_hooks & (1 << i)))
826 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
827 duprintf("Invalid hook entry %u %u\n",
831 if (newinfo->underflow[i] == 0xFFFFFFFF) {
832 duprintf("Invalid underflow %u %u\n",
838 if (!mark_source_chains(newinfo, valid_hooks, entry0))
841 /* Finally, each sanity check must pass */
843 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
844 find_check_entry, name, size, &i);
847 IPT_ENTRY_ITERATE(entry0, newinfo->size,
852 /* And one copy for every other CPU */
853 for_each_possible_cpu(i) {
854 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
855 memcpy(newinfo->entries[i], entry0, newinfo->size);
863 add_entry_to_counter(const struct ipt_entry *e,
864 struct xt_counters total[],
867 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
874 set_entry_to_counter(const struct ipt_entry *e,
875 struct ipt_counters total[],
878 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
885 get_counters(const struct xt_table_info *t,
886 struct xt_counters counters[])
892 /* Instead of clearing (by a previous call to memset())
893 * the counters and using adds, we set the counters
894 * with data used by 'current' CPU
895 * We dont care about preemption here.
897 curcpu = raw_smp_processor_id();
900 IPT_ENTRY_ITERATE(t->entries[curcpu],
902 set_entry_to_counter,
906 for_each_possible_cpu(cpu) {
910 IPT_ENTRY_ITERATE(t->entries[cpu],
912 add_entry_to_counter,
918 static inline struct xt_counters * alloc_counters(struct xt_table *table)
920 unsigned int countersize;
921 struct xt_counters *counters;
922 struct xt_table_info *private = table->private;
924 /* We need atomic snapshot of counters: rest doesn't change
925 (other than comefrom, which userspace doesn't care
927 countersize = sizeof(struct xt_counters) * private->number;
928 counters = vmalloc_node(countersize, numa_node_id());
930 if (counters == NULL)
931 return ERR_PTR(-ENOMEM);
933 /* First, sum counters... */
934 write_lock_bh(&table->lock);
935 get_counters(private, counters);
936 write_unlock_bh(&table->lock);
942 copy_entries_to_user(unsigned int total_size,
943 struct xt_table *table,
944 void __user *userptr)
946 unsigned int off, num;
948 struct xt_counters *counters;
949 struct xt_table_info *private = table->private;
953 counters = alloc_counters(table);
954 if (IS_ERR(counters))
955 return PTR_ERR(counters);
957 /* choose the copy that is on our node/cpu, ...
958 * This choice is lazy (because current thread is
959 * allowed to migrate to another cpu)
961 loc_cpu_entry = private->entries[raw_smp_processor_id()];
962 /* ... then copy entire thing ... */
963 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
968 /* FIXME: use iterator macros --RR */
969 /* ... then go back and fix counters and names */
970 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
972 struct ipt_entry_match *m;
973 struct ipt_entry_target *t;
975 e = (struct ipt_entry *)(loc_cpu_entry + off);
976 if (copy_to_user(userptr + off
977 + offsetof(struct ipt_entry, counters),
979 sizeof(counters[num])) != 0) {
984 for (i = sizeof(struct ipt_entry);
985 i < e->target_offset;
986 i += m->u.match_size) {
989 if (copy_to_user(userptr + off + i
990 + offsetof(struct ipt_entry_match,
992 m->u.kernel.match->name,
993 strlen(m->u.kernel.match->name)+1)
1000 t = ipt_get_target(e);
1001 if (copy_to_user(userptr + off + e->target_offset
1002 + offsetof(struct ipt_entry_target,
1004 t->u.kernel.target->name,
1005 strlen(t->u.kernel.target->name)+1) != 0) {
1016 #ifdef CONFIG_COMPAT
1017 static void compat_standard_from_user(void *dst, void *src)
1019 int v = *(compat_int_t *)src;
1022 v += xt_compat_calc_jump(AF_INET, v);
1023 memcpy(dst, &v, sizeof(v));
1026 static int compat_standard_to_user(void __user *dst, void *src)
1028 compat_int_t cv = *(int *)src;
1031 cv -= xt_compat_calc_jump(AF_INET, cv);
1032 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1036 compat_calc_match(struct ipt_entry_match *m, int *size)
1038 *size += xt_compat_match_offset(m->u.kernel.match);
1042 static int compat_calc_entry(struct ipt_entry *e,
1043 const struct xt_table_info *info,
1044 void *base, struct xt_table_info *newinfo)
1046 struct ipt_entry_target *t;
1047 unsigned int entry_offset;
1050 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1051 entry_offset = (void *)e - base;
1052 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1053 t = ipt_get_target(e);
1054 off += xt_compat_target_offset(t->u.kernel.target);
1055 newinfo->size -= off;
1056 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1060 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1061 if (info->hook_entry[i] &&
1062 (e < (struct ipt_entry *)(base + info->hook_entry[i])))
1063 newinfo->hook_entry[i] -= off;
1064 if (info->underflow[i] &&
1065 (e < (struct ipt_entry *)(base + info->underflow[i])))
1066 newinfo->underflow[i] -= off;
1071 static int compat_table_info(const struct xt_table_info *info,
1072 struct xt_table_info *newinfo)
1074 void *loc_cpu_entry;
1076 if (!newinfo || !info)
1079 /* we dont care about newinfo->entries[] */
1080 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1081 newinfo->initial_entries = 0;
1082 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1083 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1084 compat_calc_entry, info, loc_cpu_entry,
1089 static int get_info(void __user *user, int *len, int compat)
1091 char name[IPT_TABLE_MAXNAMELEN];
1095 if (*len != sizeof(struct ipt_getinfo)) {
1096 duprintf("length %u != %u\n", *len,
1097 (unsigned int)sizeof(struct ipt_getinfo));
1101 if (copy_from_user(name, user, sizeof(name)) != 0)
1104 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1105 #ifdef CONFIG_COMPAT
1107 xt_compat_lock(AF_INET);
1109 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1110 "iptable_%s", name);
1111 if (t && !IS_ERR(t)) {
1112 struct ipt_getinfo info;
1113 struct xt_table_info *private = t->private;
1115 #ifdef CONFIG_COMPAT
1117 struct xt_table_info tmp;
1118 ret = compat_table_info(private, &tmp);
1119 xt_compat_flush_offsets(AF_INET);
1123 info.valid_hooks = t->valid_hooks;
1124 memcpy(info.hook_entry, private->hook_entry,
1125 sizeof(info.hook_entry));
1126 memcpy(info.underflow, private->underflow,
1127 sizeof(info.underflow));
1128 info.num_entries = private->number;
1129 info.size = private->size;
1130 strcpy(info.name, name);
1132 if (copy_to_user(user, &info, *len) != 0)
1140 ret = t ? PTR_ERR(t) : -ENOENT;
1141 #ifdef CONFIG_COMPAT
1143 xt_compat_unlock(AF_INET);
1149 get_entries(struct ipt_get_entries __user *uptr, int *len)
1152 struct ipt_get_entries get;
1155 if (*len < sizeof(get)) {
1156 duprintf("get_entries: %u < %d\n", *len,
1157 (unsigned int)sizeof(get));
1160 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1162 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1163 duprintf("get_entries: %u != %u\n", *len,
1164 (unsigned int)(sizeof(struct ipt_get_entries) +
1169 t = xt_find_table_lock(AF_INET, get.name);
1170 if (t && !IS_ERR(t)) {
1171 struct xt_table_info *private = t->private;
1172 duprintf("t->private->number = %u\n",
1174 if (get.size == private->size)
1175 ret = copy_entries_to_user(private->size,
1176 t, uptr->entrytable);
1178 duprintf("get_entries: I've got %u not %u!\n",
1186 ret = t ? PTR_ERR(t) : -ENOENT;
1192 __do_replace(const char *name, unsigned int valid_hooks,
1193 struct xt_table_info *newinfo, unsigned int num_counters,
1194 void __user *counters_ptr)
1198 struct xt_table_info *oldinfo;
1199 struct xt_counters *counters;
1200 void *loc_cpu_old_entry;
1203 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1209 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1210 "iptable_%s", name);
1211 if (!t || IS_ERR(t)) {
1212 ret = t ? PTR_ERR(t) : -ENOENT;
1213 goto free_newinfo_counters_untrans;
1217 if (valid_hooks != t->valid_hooks) {
1218 duprintf("Valid hook crap: %08X vs %08X\n",
1219 valid_hooks, t->valid_hooks);
1224 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1228 /* Update module usage count based on number of rules */
1229 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1230 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1231 if ((oldinfo->number > oldinfo->initial_entries) ||
1232 (newinfo->number <= oldinfo->initial_entries))
1234 if ((oldinfo->number > oldinfo->initial_entries) &&
1235 (newinfo->number <= oldinfo->initial_entries))
1238 /* Get the old counters. */
1239 get_counters(oldinfo, counters);
1240 /* Decrease module usage counts and free resource */
1241 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1242 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1244 xt_free_table_info(oldinfo);
1245 if (copy_to_user(counters_ptr, counters,
1246 sizeof(struct xt_counters) * num_counters) != 0)
1255 free_newinfo_counters_untrans:
1262 do_replace(void __user *user, unsigned int len)
1265 struct ipt_replace tmp;
1266 struct xt_table_info *newinfo;
1267 void *loc_cpu_entry;
1269 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1272 /* Hack: Causes ipchains to give correct error msg --RR */
1273 if (len != sizeof(tmp) + tmp.size)
1274 return -ENOPROTOOPT;
1276 /* overflow check */
1277 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1280 newinfo = xt_alloc_table_info(tmp.size);
1284 /* choose the copy that is our node/cpu */
1285 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1286 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1292 ret = translate_table(tmp.name, tmp.valid_hooks,
1293 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1294 tmp.hook_entry, tmp.underflow);
1298 duprintf("ip_tables: Translated table\n");
1300 ret = __do_replace(tmp.name, tmp.valid_hooks, newinfo,
1301 tmp.num_counters, tmp.counters);
1303 goto free_newinfo_untrans;
1306 free_newinfo_untrans:
1307 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1309 xt_free_table_info(newinfo);
1313 /* We're lazy, and add to the first CPU; overflow works its fey magic
1314 * and everything is OK. */
1316 add_counter_to_entry(struct ipt_entry *e,
1317 const struct xt_counters addme[],
1321 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1323 (long unsigned int)e->counters.pcnt,
1324 (long unsigned int)e->counters.bcnt,
1325 (long unsigned int)addme[*i].pcnt,
1326 (long unsigned int)addme[*i].bcnt);
1329 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1336 do_add_counters(void __user *user, unsigned int len, int compat)
1339 struct xt_counters_info tmp;
1340 struct xt_counters *paddc;
1341 unsigned int num_counters;
1346 struct xt_table_info *private;
1348 void *loc_cpu_entry;
1349 #ifdef CONFIG_COMPAT
1350 struct compat_xt_counters_info compat_tmp;
1354 size = sizeof(struct compat_xt_counters_info);
1359 size = sizeof(struct xt_counters_info);
1362 if (copy_from_user(ptmp, user, size) != 0)
1365 #ifdef CONFIG_COMPAT
1367 num_counters = compat_tmp.num_counters;
1368 name = compat_tmp.name;
1372 num_counters = tmp.num_counters;
1376 if (len != size + num_counters * sizeof(struct xt_counters))
1379 paddc = vmalloc_node(len - size, numa_node_id());
1383 if (copy_from_user(paddc, user + size, len - size) != 0) {
1388 t = xt_find_table_lock(AF_INET, name);
1389 if (!t || IS_ERR(t)) {
1390 ret = t ? PTR_ERR(t) : -ENOENT;
1394 write_lock_bh(&t->lock);
1395 private = t->private;
1396 if (private->number != num_counters) {
1398 goto unlock_up_free;
1402 /* Choose the copy that is on our node */
1403 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1404 IPT_ENTRY_ITERATE(loc_cpu_entry,
1406 add_counter_to_entry,
1410 write_unlock_bh(&t->lock);
1419 #ifdef CONFIG_COMPAT
1420 struct compat_ipt_replace {
1421 char name[IPT_TABLE_MAXNAMELEN];
1425 u32 hook_entry[NF_INET_NUMHOOKS];
1426 u32 underflow[NF_INET_NUMHOOKS];
1428 compat_uptr_t counters; /* struct ipt_counters * */
1429 struct compat_ipt_entry entries[0];
1433 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1434 compat_uint_t *size, struct xt_counters *counters,
1437 struct ipt_entry_target *t;
1438 struct compat_ipt_entry __user *ce;
1439 u_int16_t target_offset, next_offset;
1440 compat_uint_t origsize;
1445 ce = (struct compat_ipt_entry __user *)*dstptr;
1446 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1449 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1452 *dstptr += sizeof(struct compat_ipt_entry);
1453 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1455 ret = IPT_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1456 target_offset = e->target_offset - (origsize - *size);
1459 t = ipt_get_target(e);
1460 ret = xt_compat_target_to_user(t, dstptr, size);
1464 next_offset = e->next_offset - (origsize - *size);
1465 if (put_user(target_offset, &ce->target_offset))
1467 if (put_user(next_offset, &ce->next_offset))
1477 compat_find_calc_match(struct ipt_entry_match *m,
1479 const struct ipt_ip *ip,
1480 unsigned int hookmask,
1483 struct xt_match *match;
1485 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1486 m->u.user.revision),
1487 "ipt_%s", m->u.user.name);
1488 if (IS_ERR(match) || !match) {
1489 duprintf("compat_check_calc_match: `%s' not found\n",
1491 return match ? PTR_ERR(match) : -ENOENT;
1493 m->u.kernel.match = match;
1494 *size += xt_compat_match_offset(match);
1501 compat_release_match(struct ipt_entry_match *m, unsigned int *i)
1503 if (i && (*i)-- == 0)
1506 module_put(m->u.kernel.match->me);
1511 compat_release_entry(struct compat_ipt_entry *e, unsigned int *i)
1513 struct ipt_entry_target *t;
1515 if (i && (*i)-- == 0)
1518 /* Cleanup all matches */
1519 COMPAT_IPT_MATCH_ITERATE(e, compat_release_match, NULL);
1520 t = compat_ipt_get_target(e);
1521 module_put(t->u.kernel.target->me);
1526 check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1527 struct xt_table_info *newinfo,
1529 unsigned char *base,
1530 unsigned char *limit,
1531 unsigned int *hook_entries,
1532 unsigned int *underflows,
1536 struct ipt_entry_target *t;
1537 struct xt_target *target;
1538 unsigned int entry_offset;
1541 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1542 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1543 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1544 duprintf("Bad offset %p, limit = %p\n", e, limit);
1548 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1549 sizeof(struct compat_xt_entry_target)) {
1550 duprintf("checking: element %p size %u\n",
1555 /* For purposes of check_entry casting the compat entry is fine */
1556 ret = check_entry((struct ipt_entry *)e, name);
1560 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1561 entry_offset = (void *)e - (void *)base;
1563 ret = COMPAT_IPT_MATCH_ITERATE(e, compat_find_calc_match, name,
1564 &e->ip, e->comefrom, &off, &j);
1566 goto release_matches;
1568 t = compat_ipt_get_target(e);
1569 target = try_then_request_module(xt_find_target(AF_INET,
1571 t->u.user.revision),
1572 "ipt_%s", t->u.user.name);
1573 if (IS_ERR(target) || !target) {
1574 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1576 ret = target ? PTR_ERR(target) : -ENOENT;
1577 goto release_matches;
1579 t->u.kernel.target = target;
1581 off += xt_compat_target_offset(target);
1583 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1587 /* Check hooks & underflows */
1588 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1589 if ((unsigned char *)e - base == hook_entries[h])
1590 newinfo->hook_entry[h] = hook_entries[h];
1591 if ((unsigned char *)e - base == underflows[h])
1592 newinfo->underflow[h] = underflows[h];
1595 /* Clear counters and comefrom */
1596 memset(&e->counters, 0, sizeof(e->counters));
1603 module_put(t->u.kernel.target->me);
1605 IPT_MATCH_ITERATE(e, compat_release_match, &j);
1610 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1611 unsigned int *size, const char *name,
1612 struct xt_table_info *newinfo, unsigned char *base)
1614 struct ipt_entry_target *t;
1615 struct xt_target *target;
1616 struct ipt_entry *de;
1617 unsigned int origsize;
1622 de = (struct ipt_entry *)*dstptr;
1623 memcpy(de, e, sizeof(struct ipt_entry));
1624 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1626 *dstptr += sizeof(struct ipt_entry);
1627 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1629 ret = COMPAT_IPT_MATCH_ITERATE(e, xt_compat_match_from_user,
1633 de->target_offset = e->target_offset - (origsize - *size);
1634 t = compat_ipt_get_target(e);
1635 target = t->u.kernel.target;
1636 xt_compat_target_from_user(t, dstptr, size);
1638 de->next_offset = e->next_offset - (origsize - *size);
1639 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1640 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1641 newinfo->hook_entry[h] -= origsize - *size;
1642 if ((unsigned char *)de - base < newinfo->underflow[h])
1643 newinfo->underflow[h] -= origsize - *size;
1648 static inline int compat_check_entry(struct ipt_entry *e, const char *name,
1654 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
1656 goto cleanup_matches;
1658 ret = check_target(e, name);
1660 goto cleanup_matches;
1666 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1671 translate_compat_table(const char *name,
1672 unsigned int valid_hooks,
1673 struct xt_table_info **pinfo,
1675 unsigned int total_size,
1676 unsigned int number,
1677 unsigned int *hook_entries,
1678 unsigned int *underflows)
1681 struct xt_table_info *newinfo, *info;
1682 void *pos, *entry0, *entry1;
1689 info->number = number;
1691 /* Init all hooks to impossible value. */
1692 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1693 info->hook_entry[i] = 0xFFFFFFFF;
1694 info->underflow[i] = 0xFFFFFFFF;
1697 duprintf("translate_compat_table: size %u\n", info->size);
1699 xt_compat_lock(AF_INET);
1700 /* Walk through entries, checking offsets. */
1701 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
1702 check_compat_entry_size_and_hooks,
1703 info, &size, entry0,
1704 entry0 + total_size,
1705 hook_entries, underflows, &j, name);
1711 duprintf("translate_compat_table: %u not %u entries\n",
1716 /* Check hooks all assigned */
1717 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1718 /* Only hooks which are valid */
1719 if (!(valid_hooks & (1 << i)))
1721 if (info->hook_entry[i] == 0xFFFFFFFF) {
1722 duprintf("Invalid hook entry %u %u\n",
1723 i, hook_entries[i]);
1726 if (info->underflow[i] == 0xFFFFFFFF) {
1727 duprintf("Invalid underflow %u %u\n",
1734 newinfo = xt_alloc_table_info(size);
1738 newinfo->number = number;
1739 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1740 newinfo->hook_entry[i] = info->hook_entry[i];
1741 newinfo->underflow[i] = info->underflow[i];
1743 entry1 = newinfo->entries[raw_smp_processor_id()];
1746 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
1747 compat_copy_entry_from_user, &pos, &size,
1748 name, newinfo, entry1);
1749 xt_compat_flush_offsets(AF_INET);
1750 xt_compat_unlock(AF_INET);
1755 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1759 ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1763 COMPAT_IPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1764 compat_release_entry, &j);
1765 IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1766 xt_free_table_info(newinfo);
1770 /* And one copy for every other CPU */
1771 for_each_possible_cpu(i)
1772 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1773 memcpy(newinfo->entries[i], entry1, newinfo->size);
1777 xt_free_table_info(info);
1781 xt_free_table_info(newinfo);
1783 COMPAT_IPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1786 xt_compat_flush_offsets(AF_INET);
1787 xt_compat_unlock(AF_INET);
1792 compat_do_replace(void __user *user, unsigned int len)
1795 struct compat_ipt_replace tmp;
1796 struct xt_table_info *newinfo;
1797 void *loc_cpu_entry;
1799 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1802 /* Hack: Causes ipchains to give correct error msg --RR */
1803 if (len != sizeof(tmp) + tmp.size)
1804 return -ENOPROTOOPT;
1806 /* overflow check */
1807 if (tmp.size >= INT_MAX / num_possible_cpus())
1809 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1812 newinfo = xt_alloc_table_info(tmp.size);
1816 /* choose the copy that is our node/cpu */
1817 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1818 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1824 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1825 &newinfo, &loc_cpu_entry, tmp.size,
1826 tmp.num_entries, tmp.hook_entry,
1831 duprintf("compat_do_replace: Translated table\n");
1833 ret = __do_replace(tmp.name, tmp.valid_hooks, newinfo,
1834 tmp.num_counters, compat_ptr(tmp.counters));
1836 goto free_newinfo_untrans;
1839 free_newinfo_untrans:
1840 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1842 xt_free_table_info(newinfo);
1847 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1852 if (!capable(CAP_NET_ADMIN))
1856 case IPT_SO_SET_REPLACE:
1857 ret = compat_do_replace(user, len);
1860 case IPT_SO_SET_ADD_COUNTERS:
1861 ret = do_add_counters(user, len, 1);
1865 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1872 struct compat_ipt_get_entries {
1873 char name[IPT_TABLE_MAXNAMELEN];
1875 struct compat_ipt_entry entrytable[0];
1879 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1880 void __user *userptr)
1882 struct xt_counters *counters;
1883 struct xt_table_info *private = table->private;
1887 void *loc_cpu_entry;
1890 counters = alloc_counters(table);
1891 if (IS_ERR(counters))
1892 return PTR_ERR(counters);
1894 /* choose the copy that is on our node/cpu, ...
1895 * This choice is lazy (because current thread is
1896 * allowed to migrate to another cpu)
1898 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1901 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1902 compat_copy_entry_to_user,
1903 &pos, &size, counters, &i);
1910 compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1913 struct compat_ipt_get_entries get;
1916 if (*len < sizeof(get)) {
1917 duprintf("compat_get_entries: %u < %u\n",
1918 *len, (unsigned int)sizeof(get));
1922 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1925 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1926 duprintf("compat_get_entries: %u != %u\n", *len,
1927 (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1932 xt_compat_lock(AF_INET);
1933 t = xt_find_table_lock(AF_INET, get.name);
1934 if (t && !IS_ERR(t)) {
1935 struct xt_table_info *private = t->private;
1936 struct xt_table_info info;
1937 duprintf("t->private->number = %u\n",
1939 ret = compat_table_info(private, &info);
1940 if (!ret && get.size == info.size) {
1941 ret = compat_copy_entries_to_user(private->size,
1942 t, uptr->entrytable);
1944 duprintf("compat_get_entries: I've got %u not %u!\n",
1949 xt_compat_flush_offsets(AF_INET);
1953 ret = t ? PTR_ERR(t) : -ENOENT;
1955 xt_compat_unlock(AF_INET);
1959 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1962 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1966 if (!capable(CAP_NET_ADMIN))
1970 case IPT_SO_GET_INFO:
1971 ret = get_info(user, len, 1);
1973 case IPT_SO_GET_ENTRIES:
1974 ret = compat_get_entries(user, len);
1977 ret = do_ipt_get_ctl(sk, cmd, user, len);
1984 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1988 if (!capable(CAP_NET_ADMIN))
1992 case IPT_SO_SET_REPLACE:
1993 ret = do_replace(user, len);
1996 case IPT_SO_SET_ADD_COUNTERS:
1997 ret = do_add_counters(user, len, 0);
2001 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2009 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2013 if (!capable(CAP_NET_ADMIN))
2017 case IPT_SO_GET_INFO:
2018 ret = get_info(user, len, 0);
2021 case IPT_SO_GET_ENTRIES:
2022 ret = get_entries(user, len);
2025 case IPT_SO_GET_REVISION_MATCH:
2026 case IPT_SO_GET_REVISION_TARGET: {
2027 struct ipt_get_revision rev;
2030 if (*len != sizeof(rev)) {
2034 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2039 if (cmd == IPT_SO_GET_REVISION_TARGET)
2044 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2047 "ipt_%s", rev.name);
2052 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2059 int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2062 struct xt_table_info *newinfo;
2063 struct xt_table_info bootstrap
2064 = { 0, 0, 0, { 0 }, { 0 }, { } };
2065 void *loc_cpu_entry;
2067 newinfo = xt_alloc_table_info(repl->size);
2071 /* choose the copy on our node/cpu
2072 * but dont care of preemption
2074 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2075 memcpy(loc_cpu_entry, repl->entries, repl->size);
2077 ret = translate_table(table->name, table->valid_hooks,
2078 newinfo, loc_cpu_entry, repl->size,
2083 xt_free_table_info(newinfo);
2087 ret = xt_register_table(table, &bootstrap, newinfo);
2089 xt_free_table_info(newinfo);
2096 void ipt_unregister_table(struct xt_table *table)
2098 struct xt_table_info *private;
2099 void *loc_cpu_entry;
2101 private = xt_unregister_table(table);
2103 /* Decrease module usage counts and free resources */
2104 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2105 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2106 xt_free_table_info(private);
2109 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2111 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2112 u_int8_t type, u_int8_t code,
2115 return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
2120 icmp_match(const struct sk_buff *skb,
2121 const struct net_device *in,
2122 const struct net_device *out,
2123 const struct xt_match *match,
2124 const void *matchinfo,
2126 unsigned int protoff,
2129 struct icmphdr _icmph, *ic;
2130 const struct ipt_icmp *icmpinfo = matchinfo;
2132 /* Must not be a fragment. */
2136 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2138 /* We've been asked to examine this packet, and we
2139 * can't. Hence, no choice but to drop.
2141 duprintf("Dropping evil ICMP tinygram.\n");
2146 return icmp_type_code_match(icmpinfo->type,
2150 !!(icmpinfo->invflags&IPT_ICMP_INV));
2153 /* Called when user tries to insert an entry of this type. */
2155 icmp_checkentry(const char *tablename,
2157 const struct xt_match *match,
2159 unsigned int hook_mask)
2161 const struct ipt_icmp *icmpinfo = matchinfo;
2163 /* Must specify no unknown invflags */
2164 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2167 /* The built-in targets: standard (NULL) and error. */
2168 static struct xt_target ipt_standard_target __read_mostly = {
2169 .name = IPT_STANDARD_TARGET,
2170 .targetsize = sizeof(int),
2172 #ifdef CONFIG_COMPAT
2173 .compatsize = sizeof(compat_int_t),
2174 .compat_from_user = compat_standard_from_user,
2175 .compat_to_user = compat_standard_to_user,
2179 static struct xt_target ipt_error_target __read_mostly = {
2180 .name = IPT_ERROR_TARGET,
2181 .target = ipt_error,
2182 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2186 static struct nf_sockopt_ops ipt_sockopts = {
2188 .set_optmin = IPT_BASE_CTL,
2189 .set_optmax = IPT_SO_SET_MAX+1,
2190 .set = do_ipt_set_ctl,
2191 #ifdef CONFIG_COMPAT
2192 .compat_set = compat_do_ipt_set_ctl,
2194 .get_optmin = IPT_BASE_CTL,
2195 .get_optmax = IPT_SO_GET_MAX+1,
2196 .get = do_ipt_get_ctl,
2197 #ifdef CONFIG_COMPAT
2198 .compat_get = compat_do_ipt_get_ctl,
2200 .owner = THIS_MODULE,
2203 static struct xt_match icmp_matchstruct __read_mostly = {
2205 .match = icmp_match,
2206 .matchsize = sizeof(struct ipt_icmp),
2207 .proto = IPPROTO_ICMP,
2209 .checkentry = icmp_checkentry,
2212 static int __init ip_tables_init(void)
2216 ret = xt_proto_init(AF_INET);
2220 /* Noone else will be downing sem now, so we won't sleep */
2221 ret = xt_register_target(&ipt_standard_target);
2224 ret = xt_register_target(&ipt_error_target);
2227 ret = xt_register_match(&icmp_matchstruct);
2231 /* Register setsockopt */
2232 ret = nf_register_sockopt(&ipt_sockopts);
2236 printk(KERN_INFO "ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2240 xt_unregister_match(&icmp_matchstruct);
2242 xt_unregister_target(&ipt_error_target);
2244 xt_unregister_target(&ipt_standard_target);
2246 xt_proto_fini(AF_INET);
2251 static void __exit ip_tables_fini(void)
2253 nf_unregister_sockopt(&ipt_sockopts);
2255 xt_unregister_match(&icmp_matchstruct);
2256 xt_unregister_target(&ipt_error_target);
2257 xt_unregister_target(&ipt_standard_target);
2259 xt_proto_fini(AF_INET);
2262 EXPORT_SYMBOL(ipt_register_table);
2263 EXPORT_SYMBOL(ipt_unregister_table);
2264 EXPORT_SYMBOL(ipt_do_table);
2265 module_init(ip_tables_init);
2266 module_exit(ip_tables_fini);