2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/cache.h>
12 #include <linux/capability.h>
13 #include <linux/skbuff.h>
14 #include <linux/kmod.h>
15 #include <linux/vmalloc.h>
16 #include <linux/netdevice.h>
17 #include <linux/module.h>
18 #include <linux/icmp.h>
20 #include <net/compat.h>
21 #include <asm/uaccess.h>
22 #include <linux/mutex.h>
23 #include <linux/proc_fs.h>
24 #include <linux/err.h>
25 #include <linux/cpumask.h>
27 #include <linux/netfilter/x_tables.h>
28 #include <linux/netfilter_ipv4/ip_tables.h>
30 MODULE_LICENSE("GPL");
31 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
32 MODULE_DESCRIPTION("IPv4 packet filter");
34 /*#define DEBUG_IP_FIREWALL*/
35 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
36 /*#define DEBUG_IP_FIREWALL_USER*/
38 #ifdef DEBUG_IP_FIREWALL
39 #define dprintf(format, args...) printk(format , ## args)
41 #define dprintf(format, args...)
44 #ifdef DEBUG_IP_FIREWALL_USER
45 #define duprintf(format, args...) printk(format , ## args)
47 #define duprintf(format, args...)
50 #ifdef CONFIG_NETFILTER_DEBUG
51 #define IP_NF_ASSERT(x) \
54 printk("IP_NF_ASSERT: %s:%s:%u\n", \
55 __FUNCTION__, __FILE__, __LINE__); \
58 #define IP_NF_ASSERT(x)
62 /* All the better to debug you with... */
68 We keep a set of rules for each CPU, so we can avoid write-locking
69 them in the softirq when updating the counters and therefore
70 only need to read-lock in the softirq; doing a write_lock_bh() in user
71 context stops packets coming through and allows user context to read
72 the counters or update the rules.
74 Hence the start of any table is given by get_table() below. */
76 /* Returns whether matches rule or not. */
78 ip_packet_match(const struct iphdr *ip,
81 const struct ipt_ip *ipinfo,
87 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
89 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
91 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
93 dprintf("Source or dest mismatch.\n");
95 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
97 NIPQUAD(ipinfo->smsk.s_addr),
98 NIPQUAD(ipinfo->src.s_addr),
99 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
100 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
102 NIPQUAD(ipinfo->dmsk.s_addr),
103 NIPQUAD(ipinfo->dst.s_addr),
104 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
108 /* Look for ifname matches; this should unroll nicely. */
109 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
110 ret |= (((const unsigned long *)indev)[i]
111 ^ ((const unsigned long *)ipinfo->iniface)[i])
112 & ((const unsigned long *)ipinfo->iniface_mask)[i];
115 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
116 dprintf("VIA in mismatch (%s vs %s).%s\n",
117 indev, ipinfo->iniface,
118 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
122 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
123 ret |= (((const unsigned long *)outdev)[i]
124 ^ ((const unsigned long *)ipinfo->outiface)[i])
125 & ((const unsigned long *)ipinfo->outiface_mask)[i];
128 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
129 dprintf("VIA out mismatch (%s vs %s).%s\n",
130 outdev, ipinfo->outiface,
131 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
135 /* Check specific protocol */
137 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
138 dprintf("Packet protocol %hi does not match %hi.%s\n",
139 ip->protocol, ipinfo->proto,
140 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
144 /* If we have a fragment rule but the packet is not a fragment
145 * then we return zero */
146 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
147 dprintf("Fragment rule but not fragment.%s\n",
148 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
156 ip_checkentry(const struct ipt_ip *ip)
158 if (ip->flags & ~IPT_F_MASK) {
159 duprintf("Unknown flag bits set: %08X\n",
160 ip->flags & ~IPT_F_MASK);
163 if (ip->invflags & ~IPT_INV_MASK) {
164 duprintf("Unknown invflag bits set: %08X\n",
165 ip->invflags & ~IPT_INV_MASK);
172 ipt_error(struct sk_buff *skb,
173 const struct net_device *in,
174 const struct net_device *out,
175 unsigned int hooknum,
176 const struct xt_target *target,
177 const void *targinfo)
180 printk("ip_tables: error: `%s'\n", (char *)targinfo);
186 bool do_match(struct ipt_entry_match *m,
187 const struct sk_buff *skb,
188 const struct net_device *in,
189 const struct net_device *out,
193 /* Stop iteration if it doesn't match */
194 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
195 offset, ip_hdrlen(skb), hotdrop))
201 static inline struct ipt_entry *
202 get_entry(void *base, unsigned int offset)
204 return (struct ipt_entry *)(base + offset);
207 /* All zeroes == unconditional rule. */
209 unconditional(const struct ipt_ip *ip)
213 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
214 if (((__u32 *)ip)[i])
220 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
221 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
222 static const char *hooknames[] = {
223 [NF_INET_PRE_ROUTING] = "PREROUTING",
224 [NF_INET_LOCAL_IN] = "INPUT",
225 [NF_INET_FORWARD] = "FORWARD",
226 [NF_INET_LOCAL_OUT] = "OUTPUT",
227 [NF_INET_POST_ROUTING] = "POSTROUTING",
230 enum nf_ip_trace_comments {
231 NF_IP_TRACE_COMMENT_RULE,
232 NF_IP_TRACE_COMMENT_RETURN,
233 NF_IP_TRACE_COMMENT_POLICY,
236 static const char *comments[] = {
237 [NF_IP_TRACE_COMMENT_RULE] = "rule",
238 [NF_IP_TRACE_COMMENT_RETURN] = "return",
239 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
242 static struct nf_loginfo trace_loginfo = {
243 .type = NF_LOG_TYPE_LOG,
247 .logflags = NF_LOG_MASK,
253 get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
254 char *hookname, char **chainname,
255 char **comment, unsigned int *rulenum)
257 struct ipt_standard_target *t = (void *)ipt_get_target(s);
259 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) {
260 /* Head of user chain: ERROR target with chainname */
261 *chainname = t->target.data;
266 if (s->target_offset == sizeof(struct ipt_entry)
267 && strcmp(t->target.u.kernel.target->name,
268 IPT_STANDARD_TARGET) == 0
270 && unconditional(&s->ip)) {
271 /* Tail of chains: STANDARD target (return/policy) */
272 *comment = *chainname == hookname
273 ? (char *)comments[NF_IP_TRACE_COMMENT_POLICY]
274 : (char *)comments[NF_IP_TRACE_COMMENT_RETURN];
283 static void trace_packet(struct sk_buff *skb,
285 const struct net_device *in,
286 const struct net_device *out,
288 struct xt_table_info *private,
292 struct ipt_entry *root;
293 char *hookname, *chainname, *comment;
294 unsigned int rulenum = 0;
296 table_base = (void *)private->entries[smp_processor_id()];
297 root = get_entry(table_base, private->hook_entry[hook]);
299 hookname = chainname = (char *)hooknames[hook];
300 comment = (char *)comments[NF_IP_TRACE_COMMENT_RULE];
302 IPT_ENTRY_ITERATE(root,
303 private->size - private->hook_entry[hook],
304 get_chainname_rulenum,
305 e, hookname, &chainname, &comment, &rulenum);
307 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
308 "TRACE: %s:%s:%s:%u ",
309 tablename, chainname, comment, rulenum);
313 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
315 ipt_do_table(struct sk_buff *skb,
317 const struct net_device *in,
318 const struct net_device *out,
319 struct xt_table *table)
321 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
325 bool hotdrop = false;
326 /* Initializing verdict to NF_DROP keeps gcc happy. */
327 unsigned int verdict = NF_DROP;
328 const char *indev, *outdev;
330 struct ipt_entry *e, *back;
331 struct xt_table_info *private;
335 datalen = skb->len - ip->ihl * 4;
336 indev = in ? in->name : nulldevname;
337 outdev = out ? out->name : nulldevname;
338 /* We handle fragments by dealing with the first fragment as
339 * if it was a normal packet. All other fragments are treated
340 * normally, except that they will NEVER match rules that ask
341 * things we don't know, ie. tcp syn flag or ports). If the
342 * rule is also a fragment-specific rule, non-fragments won't
344 offset = ntohs(ip->frag_off) & IP_OFFSET;
346 read_lock_bh(&table->lock);
347 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
348 private = table->private;
349 table_base = (void *)private->entries[smp_processor_id()];
350 e = get_entry(table_base, private->hook_entry[hook]);
352 /* For return from builtin chain */
353 back = get_entry(table_base, private->underflow[hook]);
358 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
359 struct ipt_entry_target *t;
361 if (IPT_MATCH_ITERATE(e, do_match,
363 offset, &hotdrop) != 0)
366 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
368 t = ipt_get_target(e);
369 IP_NF_ASSERT(t->u.kernel.target);
371 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
372 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
373 /* The packet is traced: log it */
374 if (unlikely(skb->nf_trace))
375 trace_packet(skb, hook, in, out,
376 table->name, private, e);
378 /* Standard target? */
379 if (!t->u.kernel.target->target) {
382 v = ((struct ipt_standard_target *)t)->verdict;
384 /* Pop from stack? */
385 if (v != IPT_RETURN) {
386 verdict = (unsigned)(-v) - 1;
390 back = get_entry(table_base,
394 if (table_base + v != (void *)e + e->next_offset
395 && !(e->ip.flags & IPT_F_GOTO)) {
396 /* Save old back ptr in next entry */
397 struct ipt_entry *next
398 = (void *)e + e->next_offset;
400 = (void *)back - table_base;
401 /* set back pointer to next entry */
405 e = get_entry(table_base, v);
407 /* Targets which reenter must return
409 #ifdef CONFIG_NETFILTER_DEBUG
410 ((struct ipt_entry *)table_base)->comefrom
413 verdict = t->u.kernel.target->target(skb,
419 #ifdef CONFIG_NETFILTER_DEBUG
420 if (((struct ipt_entry *)table_base)->comefrom
422 && verdict == IPT_CONTINUE) {
423 printk("Target %s reentered!\n",
424 t->u.kernel.target->name);
427 ((struct ipt_entry *)table_base)->comefrom
430 /* Target might have changed stuff. */
432 datalen = skb->len - ip->ihl * 4;
434 if (verdict == IPT_CONTINUE)
435 e = (void *)e + e->next_offset;
443 e = (void *)e + e->next_offset;
447 read_unlock_bh(&table->lock);
449 #ifdef DEBUG_ALLOW_ALL
458 /* Figures out from what hook each rule can be called: returns 0 if
459 there are loops. Puts hook bitmask in comefrom. */
461 mark_source_chains(struct xt_table_info *newinfo,
462 unsigned int valid_hooks, void *entry0)
466 /* No recursion; use packet counter to save back ptrs (reset
467 to 0 as we leave), and comefrom to save source hook bitmask */
468 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
469 unsigned int pos = newinfo->hook_entry[hook];
470 struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos);
472 if (!(valid_hooks & (1 << hook)))
475 /* Set initial back pointer. */
476 e->counters.pcnt = pos;
479 struct ipt_standard_target *t
480 = (void *)ipt_get_target(e);
481 int visited = e->comefrom & (1 << hook);
483 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
484 printk("iptables: loop hook %u pos %u %08X.\n",
485 hook, pos, e->comefrom);
488 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
490 /* Unconditional return/END. */
491 if ((e->target_offset == sizeof(struct ipt_entry)
492 && (strcmp(t->target.u.user.name,
493 IPT_STANDARD_TARGET) == 0)
495 && unconditional(&e->ip)) || visited) {
496 unsigned int oldpos, size;
498 if (t->verdict < -NF_MAX_VERDICT - 1) {
499 duprintf("mark_source_chains: bad "
500 "negative verdict (%i)\n",
505 /* Return: backtrack through the last
508 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
509 #ifdef DEBUG_IP_FIREWALL_USER
511 & (1 << NF_INET_NUMHOOKS)) {
512 duprintf("Back unset "
519 pos = e->counters.pcnt;
520 e->counters.pcnt = 0;
522 /* We're at the start. */
526 e = (struct ipt_entry *)
528 } while (oldpos == pos + e->next_offset);
531 size = e->next_offset;
532 e = (struct ipt_entry *)
533 (entry0 + pos + size);
534 e->counters.pcnt = pos;
537 int newpos = t->verdict;
539 if (strcmp(t->target.u.user.name,
540 IPT_STANDARD_TARGET) == 0
542 if (newpos > newinfo->size -
543 sizeof(struct ipt_entry)) {
544 duprintf("mark_source_chains: "
545 "bad verdict (%i)\n",
549 /* This a jump; chase it. */
550 duprintf("Jump rule %u -> %u\n",
553 /* ... this is a fallthru */
554 newpos = pos + e->next_offset;
556 e = (struct ipt_entry *)
558 e->counters.pcnt = pos;
563 duprintf("Finished chain %u\n", hook);
569 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
571 if (i && (*i)-- == 0)
574 if (m->u.kernel.match->destroy)
575 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
576 module_put(m->u.kernel.match->me);
581 check_entry(struct ipt_entry *e, const char *name)
583 struct ipt_entry_target *t;
585 if (!ip_checkentry(&e->ip)) {
586 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
590 if (e->target_offset + sizeof(struct ipt_entry_target) >
594 t = ipt_get_target(e);
595 if (e->target_offset + t->u.target_size > e->next_offset)
601 static inline int check_match(struct ipt_entry_match *m, const char *name,
602 const struct ipt_ip *ip,
603 unsigned int hookmask, unsigned int *i)
605 struct xt_match *match;
608 match = m->u.kernel.match;
609 ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
610 name, hookmask, ip->proto,
611 ip->invflags & IPT_INV_PROTO);
612 if (!ret && m->u.kernel.match->checkentry
613 && !m->u.kernel.match->checkentry(name, ip, match, m->data,
615 duprintf("ip_tables: check failed for `%s'.\n",
616 m->u.kernel.match->name);
625 find_check_match(struct ipt_entry_match *m,
627 const struct ipt_ip *ip,
628 unsigned int hookmask,
631 struct xt_match *match;
634 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
636 "ipt_%s", m->u.user.name);
637 if (IS_ERR(match) || !match) {
638 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
639 return match ? PTR_ERR(match) : -ENOENT;
641 m->u.kernel.match = match;
643 ret = check_match(m, name, ip, hookmask, i);
649 module_put(m->u.kernel.match->me);
653 static inline int check_target(struct ipt_entry *e, const char *name)
655 struct ipt_entry_target *t;
656 struct xt_target *target;
659 t = ipt_get_target(e);
660 target = t->u.kernel.target;
661 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
662 name, e->comefrom, e->ip.proto,
663 e->ip.invflags & IPT_INV_PROTO);
664 if (!ret && t->u.kernel.target->checkentry
665 && !t->u.kernel.target->checkentry(name, e, target, t->data,
667 duprintf("ip_tables: check failed for `%s'.\n",
668 t->u.kernel.target->name);
675 find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
678 struct ipt_entry_target *t;
679 struct xt_target *target;
683 ret = check_entry(e, name);
688 ret = IPT_MATCH_ITERATE(e, find_check_match, name, &e->ip,
691 goto cleanup_matches;
693 t = ipt_get_target(e);
694 target = try_then_request_module(xt_find_target(AF_INET,
697 "ipt_%s", t->u.user.name);
698 if (IS_ERR(target) || !target) {
699 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
700 ret = target ? PTR_ERR(target) : -ENOENT;
701 goto cleanup_matches;
703 t->u.kernel.target = target;
705 ret = check_target(e, name);
712 module_put(t->u.kernel.target->me);
714 IPT_MATCH_ITERATE(e, cleanup_match, &j);
719 check_entry_size_and_hooks(struct ipt_entry *e,
720 struct xt_table_info *newinfo,
722 unsigned char *limit,
723 const unsigned int *hook_entries,
724 const unsigned int *underflows,
729 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
730 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
731 duprintf("Bad offset %p\n", e);
736 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
737 duprintf("checking: element %p size %u\n",
742 /* Check hooks & underflows */
743 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
744 if ((unsigned char *)e - base == hook_entries[h])
745 newinfo->hook_entry[h] = hook_entries[h];
746 if ((unsigned char *)e - base == underflows[h])
747 newinfo->underflow[h] = underflows[h];
750 /* FIXME: underflows must be unconditional, standard verdicts
751 < 0 (not IPT_RETURN). --RR */
753 /* Clear counters and comefrom */
754 e->counters = ((struct xt_counters) { 0, 0 });
762 cleanup_entry(struct ipt_entry *e, unsigned int *i)
764 struct ipt_entry_target *t;
766 if (i && (*i)-- == 0)
769 /* Cleanup all matches */
770 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
771 t = ipt_get_target(e);
772 if (t->u.kernel.target->destroy)
773 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
774 module_put(t->u.kernel.target->me);
778 /* Checks and translates the user-supplied table segment (held in
781 translate_table(const char *name,
782 unsigned int valid_hooks,
783 struct xt_table_info *newinfo,
787 const unsigned int *hook_entries,
788 const unsigned int *underflows)
793 newinfo->size = size;
794 newinfo->number = number;
796 /* Init all hooks to impossible value. */
797 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
798 newinfo->hook_entry[i] = 0xFFFFFFFF;
799 newinfo->underflow[i] = 0xFFFFFFFF;
802 duprintf("translate_table: size %u\n", newinfo->size);
804 /* Walk through entries, checking offsets. */
805 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
806 check_entry_size_and_hooks,
810 hook_entries, underflows, &i);
815 duprintf("translate_table: %u not %u entries\n",
820 /* Check hooks all assigned */
821 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
822 /* Only hooks which are valid */
823 if (!(valid_hooks & (1 << i)))
825 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
826 duprintf("Invalid hook entry %u %u\n",
830 if (newinfo->underflow[i] == 0xFFFFFFFF) {
831 duprintf("Invalid underflow %u %u\n",
837 if (!mark_source_chains(newinfo, valid_hooks, entry0))
840 /* Finally, each sanity check must pass */
842 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
843 find_check_entry, name, size, &i);
846 IPT_ENTRY_ITERATE(entry0, newinfo->size,
851 /* And one copy for every other CPU */
852 for_each_possible_cpu(i) {
853 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
854 memcpy(newinfo->entries[i], entry0, newinfo->size);
862 add_entry_to_counter(const struct ipt_entry *e,
863 struct xt_counters total[],
866 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
873 set_entry_to_counter(const struct ipt_entry *e,
874 struct ipt_counters total[],
877 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
884 get_counters(const struct xt_table_info *t,
885 struct xt_counters counters[])
891 /* Instead of clearing (by a previous call to memset())
892 * the counters and using adds, we set the counters
893 * with data used by 'current' CPU
894 * We dont care about preemption here.
896 curcpu = raw_smp_processor_id();
899 IPT_ENTRY_ITERATE(t->entries[curcpu],
901 set_entry_to_counter,
905 for_each_possible_cpu(cpu) {
909 IPT_ENTRY_ITERATE(t->entries[cpu],
911 add_entry_to_counter,
917 static inline struct xt_counters * alloc_counters(struct xt_table *table)
919 unsigned int countersize;
920 struct xt_counters *counters;
921 struct xt_table_info *private = table->private;
923 /* We need atomic snapshot of counters: rest doesn't change
924 (other than comefrom, which userspace doesn't care
926 countersize = sizeof(struct xt_counters) * private->number;
927 counters = vmalloc_node(countersize, numa_node_id());
929 if (counters == NULL)
930 return ERR_PTR(-ENOMEM);
932 /* First, sum counters... */
933 write_lock_bh(&table->lock);
934 get_counters(private, counters);
935 write_unlock_bh(&table->lock);
941 copy_entries_to_user(unsigned int total_size,
942 struct xt_table *table,
943 void __user *userptr)
945 unsigned int off, num;
947 struct xt_counters *counters;
948 struct xt_table_info *private = table->private;
952 counters = alloc_counters(table);
953 if (IS_ERR(counters))
954 return PTR_ERR(counters);
956 /* choose the copy that is on our node/cpu, ...
957 * This choice is lazy (because current thread is
958 * allowed to migrate to another cpu)
960 loc_cpu_entry = private->entries[raw_smp_processor_id()];
961 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
966 /* FIXME: use iterator macros --RR */
967 /* ... then go back and fix counters and names */
968 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
970 struct ipt_entry_match *m;
971 struct ipt_entry_target *t;
973 e = (struct ipt_entry *)(loc_cpu_entry + off);
974 if (copy_to_user(userptr + off
975 + offsetof(struct ipt_entry, counters),
977 sizeof(counters[num])) != 0) {
982 for (i = sizeof(struct ipt_entry);
983 i < e->target_offset;
984 i += m->u.match_size) {
987 if (copy_to_user(userptr + off + i
988 + offsetof(struct ipt_entry_match,
990 m->u.kernel.match->name,
991 strlen(m->u.kernel.match->name)+1)
998 t = ipt_get_target(e);
999 if (copy_to_user(userptr + off + e->target_offset
1000 + offsetof(struct ipt_entry_target,
1002 t->u.kernel.target->name,
1003 strlen(t->u.kernel.target->name)+1) != 0) {
1014 #ifdef CONFIG_COMPAT
1015 static void compat_standard_from_user(void *dst, void *src)
1017 int v = *(compat_int_t *)src;
1020 v += xt_compat_calc_jump(AF_INET, v);
1021 memcpy(dst, &v, sizeof(v));
1024 static int compat_standard_to_user(void __user *dst, void *src)
1026 compat_int_t cv = *(int *)src;
1029 cv -= xt_compat_calc_jump(AF_INET, cv);
1030 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1034 compat_calc_match(struct ipt_entry_match *m, int *size)
1036 *size += xt_compat_match_offset(m->u.kernel.match);
1040 static int compat_calc_entry(struct ipt_entry *e,
1041 const struct xt_table_info *info,
1042 void *base, struct xt_table_info *newinfo)
1044 struct ipt_entry_target *t;
1045 unsigned int entry_offset;
1048 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1049 entry_offset = (void *)e - base;
1050 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1051 t = ipt_get_target(e);
1052 off += xt_compat_target_offset(t->u.kernel.target);
1053 newinfo->size -= off;
1054 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1058 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1059 if (info->hook_entry[i] &&
1060 (e < (struct ipt_entry *)(base + info->hook_entry[i])))
1061 newinfo->hook_entry[i] -= off;
1062 if (info->underflow[i] &&
1063 (e < (struct ipt_entry *)(base + info->underflow[i])))
1064 newinfo->underflow[i] -= off;
1069 static int compat_table_info(const struct xt_table_info *info,
1070 struct xt_table_info *newinfo)
1072 void *loc_cpu_entry;
1074 if (!newinfo || !info)
1077 /* we dont care about newinfo->entries[] */
1078 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1079 newinfo->initial_entries = 0;
1080 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1081 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1082 compat_calc_entry, info, loc_cpu_entry,
1087 static int get_info(void __user *user, int *len, int compat)
1089 char name[IPT_TABLE_MAXNAMELEN];
1093 if (*len != sizeof(struct ipt_getinfo)) {
1094 duprintf("length %u != %u\n", *len,
1095 (unsigned int)sizeof(struct ipt_getinfo));
1099 if (copy_from_user(name, user, sizeof(name)) != 0)
1102 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1103 #ifdef CONFIG_COMPAT
1105 xt_compat_lock(AF_INET);
1107 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1108 "iptable_%s", name);
1109 if (t && !IS_ERR(t)) {
1110 struct ipt_getinfo info;
1111 struct xt_table_info *private = t->private;
1113 #ifdef CONFIG_COMPAT
1115 struct xt_table_info tmp;
1116 ret = compat_table_info(private, &tmp);
1117 xt_compat_flush_offsets(AF_INET);
1121 info.valid_hooks = t->valid_hooks;
1122 memcpy(info.hook_entry, private->hook_entry,
1123 sizeof(info.hook_entry));
1124 memcpy(info.underflow, private->underflow,
1125 sizeof(info.underflow));
1126 info.num_entries = private->number;
1127 info.size = private->size;
1128 strcpy(info.name, name);
1130 if (copy_to_user(user, &info, *len) != 0)
1138 ret = t ? PTR_ERR(t) : -ENOENT;
1139 #ifdef CONFIG_COMPAT
1141 xt_compat_unlock(AF_INET);
1147 get_entries(struct ipt_get_entries __user *uptr, int *len)
1150 struct ipt_get_entries get;
1153 if (*len < sizeof(get)) {
1154 duprintf("get_entries: %u < %d\n", *len,
1155 (unsigned int)sizeof(get));
1158 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1160 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1161 duprintf("get_entries: %u != %u\n", *len,
1162 (unsigned int)(sizeof(struct ipt_get_entries) +
1167 t = xt_find_table_lock(AF_INET, get.name);
1168 if (t && !IS_ERR(t)) {
1169 struct xt_table_info *private = t->private;
1170 duprintf("t->private->number = %u\n", private->number);
1171 if (get.size == private->size)
1172 ret = copy_entries_to_user(private->size,
1173 t, uptr->entrytable);
1175 duprintf("get_entries: I've got %u not %u!\n",
1176 private->size, get.size);
1182 ret = t ? PTR_ERR(t) : -ENOENT;
1188 __do_replace(const char *name, unsigned int valid_hooks,
1189 struct xt_table_info *newinfo, unsigned int num_counters,
1190 void __user *counters_ptr)
1194 struct xt_table_info *oldinfo;
1195 struct xt_counters *counters;
1196 void *loc_cpu_old_entry;
1199 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1205 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1206 "iptable_%s", name);
1207 if (!t || IS_ERR(t)) {
1208 ret = t ? PTR_ERR(t) : -ENOENT;
1209 goto free_newinfo_counters_untrans;
1213 if (valid_hooks != t->valid_hooks) {
1214 duprintf("Valid hook crap: %08X vs %08X\n",
1215 valid_hooks, t->valid_hooks);
1220 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1224 /* Update module usage count based on number of rules */
1225 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1226 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1227 if ((oldinfo->number > oldinfo->initial_entries) ||
1228 (newinfo->number <= oldinfo->initial_entries))
1230 if ((oldinfo->number > oldinfo->initial_entries) &&
1231 (newinfo->number <= oldinfo->initial_entries))
1234 /* Get the old counters. */
1235 get_counters(oldinfo, counters);
1236 /* Decrease module usage counts and free resource */
1237 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1238 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1240 xt_free_table_info(oldinfo);
1241 if (copy_to_user(counters_ptr, counters,
1242 sizeof(struct xt_counters) * num_counters) != 0)
1251 free_newinfo_counters_untrans:
1258 do_replace(void __user *user, unsigned int len)
1261 struct ipt_replace tmp;
1262 struct xt_table_info *newinfo;
1263 void *loc_cpu_entry;
1265 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1268 /* Hack: Causes ipchains to give correct error msg --RR */
1269 if (len != sizeof(tmp) + tmp.size)
1270 return -ENOPROTOOPT;
1272 /* overflow check */
1273 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1276 newinfo = xt_alloc_table_info(tmp.size);
1280 /* choose the copy that is on our node/cpu */
1281 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1282 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1288 ret = translate_table(tmp.name, tmp.valid_hooks,
1289 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1290 tmp.hook_entry, tmp.underflow);
1294 duprintf("ip_tables: Translated table\n");
1296 ret = __do_replace(tmp.name, tmp.valid_hooks, newinfo,
1297 tmp.num_counters, tmp.counters);
1299 goto free_newinfo_untrans;
1302 free_newinfo_untrans:
1303 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1305 xt_free_table_info(newinfo);
1309 /* We're lazy, and add to the first CPU; overflow works its fey magic
1310 * and everything is OK. */
1312 add_counter_to_entry(struct ipt_entry *e,
1313 const struct xt_counters addme[],
1317 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1319 (long unsigned int)e->counters.pcnt,
1320 (long unsigned int)e->counters.bcnt,
1321 (long unsigned int)addme[*i].pcnt,
1322 (long unsigned int)addme[*i].bcnt);
1325 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1332 do_add_counters(void __user *user, unsigned int len, int compat)
1335 struct xt_counters_info tmp;
1336 struct xt_counters *paddc;
1337 unsigned int num_counters;
1342 struct xt_table_info *private;
1344 void *loc_cpu_entry;
1345 #ifdef CONFIG_COMPAT
1346 struct compat_xt_counters_info compat_tmp;
1350 size = sizeof(struct compat_xt_counters_info);
1355 size = sizeof(struct xt_counters_info);
1358 if (copy_from_user(ptmp, user, size) != 0)
1361 #ifdef CONFIG_COMPAT
1363 num_counters = compat_tmp.num_counters;
1364 name = compat_tmp.name;
1368 num_counters = tmp.num_counters;
1372 if (len != size + num_counters * sizeof(struct xt_counters))
1375 paddc = vmalloc_node(len - size, numa_node_id());
1379 if (copy_from_user(paddc, user + size, len - size) != 0) {
1384 t = xt_find_table_lock(AF_INET, name);
1385 if (!t || IS_ERR(t)) {
1386 ret = t ? PTR_ERR(t) : -ENOENT;
1390 write_lock_bh(&t->lock);
1391 private = t->private;
1392 if (private->number != num_counters) {
1394 goto unlock_up_free;
1398 /* Choose the copy that is on our node */
1399 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1400 IPT_ENTRY_ITERATE(loc_cpu_entry,
1402 add_counter_to_entry,
1406 write_unlock_bh(&t->lock);
1415 #ifdef CONFIG_COMPAT
1416 struct compat_ipt_replace {
1417 char name[IPT_TABLE_MAXNAMELEN];
1421 u32 hook_entry[NF_INET_NUMHOOKS];
1422 u32 underflow[NF_INET_NUMHOOKS];
1424 compat_uptr_t counters; /* struct ipt_counters * */
1425 struct compat_ipt_entry entries[0];
1429 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1430 compat_uint_t *size, struct xt_counters *counters,
1433 struct ipt_entry_target *t;
1434 struct compat_ipt_entry __user *ce;
1435 u_int16_t target_offset, next_offset;
1436 compat_uint_t origsize;
1441 ce = (struct compat_ipt_entry __user *)*dstptr;
1442 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1445 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1448 *dstptr += sizeof(struct compat_ipt_entry);
1449 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1451 ret = IPT_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1452 target_offset = e->target_offset - (origsize - *size);
1455 t = ipt_get_target(e);
1456 ret = xt_compat_target_to_user(t, dstptr, size);
1460 next_offset = e->next_offset - (origsize - *size);
1461 if (put_user(target_offset, &ce->target_offset))
1463 if (put_user(next_offset, &ce->next_offset))
1473 compat_find_calc_match(struct ipt_entry_match *m,
1475 const struct ipt_ip *ip,
1476 unsigned int hookmask,
1479 struct xt_match *match;
1481 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1482 m->u.user.revision),
1483 "ipt_%s", m->u.user.name);
1484 if (IS_ERR(match) || !match) {
1485 duprintf("compat_check_calc_match: `%s' not found\n",
1487 return match ? PTR_ERR(match) : -ENOENT;
1489 m->u.kernel.match = match;
1490 *size += xt_compat_match_offset(match);
1497 compat_release_match(struct ipt_entry_match *m, unsigned int *i)
1499 if (i && (*i)-- == 0)
1502 module_put(m->u.kernel.match->me);
1507 compat_release_entry(struct compat_ipt_entry *e, unsigned int *i)
1509 struct ipt_entry_target *t;
1511 if (i && (*i)-- == 0)
1514 /* Cleanup all matches */
1515 COMPAT_IPT_MATCH_ITERATE(e, compat_release_match, NULL);
1516 t = compat_ipt_get_target(e);
1517 module_put(t->u.kernel.target->me);
1522 check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1523 struct xt_table_info *newinfo,
1525 unsigned char *base,
1526 unsigned char *limit,
1527 unsigned int *hook_entries,
1528 unsigned int *underflows,
1532 struct ipt_entry_target *t;
1533 struct xt_target *target;
1534 unsigned int entry_offset;
1537 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1538 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1539 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1540 duprintf("Bad offset %p, limit = %p\n", e, limit);
1544 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1545 sizeof(struct compat_xt_entry_target)) {
1546 duprintf("checking: element %p size %u\n",
1551 /* For purposes of check_entry casting the compat entry is fine */
1552 ret = check_entry((struct ipt_entry *)e, name);
1556 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1557 entry_offset = (void *)e - (void *)base;
1559 ret = COMPAT_IPT_MATCH_ITERATE(e, compat_find_calc_match, name,
1560 &e->ip, e->comefrom, &off, &j);
1562 goto release_matches;
1564 t = compat_ipt_get_target(e);
1565 target = try_then_request_module(xt_find_target(AF_INET,
1567 t->u.user.revision),
1568 "ipt_%s", t->u.user.name);
1569 if (IS_ERR(target) || !target) {
1570 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1572 ret = target ? PTR_ERR(target) : -ENOENT;
1573 goto release_matches;
1575 t->u.kernel.target = target;
1577 off += xt_compat_target_offset(target);
1579 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1583 /* Check hooks & underflows */
1584 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1585 if ((unsigned char *)e - base == hook_entries[h])
1586 newinfo->hook_entry[h] = hook_entries[h];
1587 if ((unsigned char *)e - base == underflows[h])
1588 newinfo->underflow[h] = underflows[h];
1591 /* Clear counters and comefrom */
1592 memset(&e->counters, 0, sizeof(e->counters));
1599 module_put(t->u.kernel.target->me);
1601 IPT_MATCH_ITERATE(e, compat_release_match, &j);
1606 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1607 unsigned int *size, const char *name,
1608 struct xt_table_info *newinfo, unsigned char *base)
1610 struct ipt_entry_target *t;
1611 struct xt_target *target;
1612 struct ipt_entry *de;
1613 unsigned int origsize;
1618 de = (struct ipt_entry *)*dstptr;
1619 memcpy(de, e, sizeof(struct ipt_entry));
1620 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1622 *dstptr += sizeof(struct ipt_entry);
1623 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1625 ret = COMPAT_IPT_MATCH_ITERATE(e, xt_compat_match_from_user,
1629 de->target_offset = e->target_offset - (origsize - *size);
1630 t = compat_ipt_get_target(e);
1631 target = t->u.kernel.target;
1632 xt_compat_target_from_user(t, dstptr, size);
1634 de->next_offset = e->next_offset - (origsize - *size);
1635 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1636 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1637 newinfo->hook_entry[h] -= origsize - *size;
1638 if ((unsigned char *)de - base < newinfo->underflow[h])
1639 newinfo->underflow[h] -= origsize - *size;
1644 static inline int compat_check_entry(struct ipt_entry *e, const char *name,
1650 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip,
1653 goto cleanup_matches;
1655 ret = check_target(e, name);
1657 goto cleanup_matches;
1663 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1668 translate_compat_table(const char *name,
1669 unsigned int valid_hooks,
1670 struct xt_table_info **pinfo,
1672 unsigned int total_size,
1673 unsigned int number,
1674 unsigned int *hook_entries,
1675 unsigned int *underflows)
1678 struct xt_table_info *newinfo, *info;
1679 void *pos, *entry0, *entry1;
1686 info->number = number;
1688 /* Init all hooks to impossible value. */
1689 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1690 info->hook_entry[i] = 0xFFFFFFFF;
1691 info->underflow[i] = 0xFFFFFFFF;
1694 duprintf("translate_compat_table: size %u\n", info->size);
1696 xt_compat_lock(AF_INET);
1697 /* Walk through entries, checking offsets. */
1698 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
1699 check_compat_entry_size_and_hooks,
1700 info, &size, entry0,
1701 entry0 + total_size,
1702 hook_entries, underflows, &j, name);
1708 duprintf("translate_compat_table: %u not %u entries\n",
1713 /* Check hooks all assigned */
1714 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1715 /* Only hooks which are valid */
1716 if (!(valid_hooks & (1 << i)))
1718 if (info->hook_entry[i] == 0xFFFFFFFF) {
1719 duprintf("Invalid hook entry %u %u\n",
1720 i, hook_entries[i]);
1723 if (info->underflow[i] == 0xFFFFFFFF) {
1724 duprintf("Invalid underflow %u %u\n",
1731 newinfo = xt_alloc_table_info(size);
1735 newinfo->number = number;
1736 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1737 newinfo->hook_entry[i] = info->hook_entry[i];
1738 newinfo->underflow[i] = info->underflow[i];
1740 entry1 = newinfo->entries[raw_smp_processor_id()];
1743 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
1744 compat_copy_entry_from_user,
1745 &pos, &size, name, newinfo, entry1);
1746 xt_compat_flush_offsets(AF_INET);
1747 xt_compat_unlock(AF_INET);
1752 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1756 ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1760 COMPAT_IPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1761 compat_release_entry, &j);
1762 IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1763 xt_free_table_info(newinfo);
1767 /* And one copy for every other CPU */
1768 for_each_possible_cpu(i)
1769 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1770 memcpy(newinfo->entries[i], entry1, newinfo->size);
1774 xt_free_table_info(info);
1778 xt_free_table_info(newinfo);
1780 COMPAT_IPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1783 xt_compat_flush_offsets(AF_INET);
1784 xt_compat_unlock(AF_INET);
1789 compat_do_replace(void __user *user, unsigned int len)
1792 struct compat_ipt_replace tmp;
1793 struct xt_table_info *newinfo;
1794 void *loc_cpu_entry;
1796 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1799 /* Hack: Causes ipchains to give correct error msg --RR */
1800 if (len != sizeof(tmp) + tmp.size)
1801 return -ENOPROTOOPT;
1803 /* overflow check */
1804 if (tmp.size >= INT_MAX / num_possible_cpus())
1806 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1809 newinfo = xt_alloc_table_info(tmp.size);
1813 /* choose the copy that is on our node/cpu */
1814 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1815 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1821 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1822 &newinfo, &loc_cpu_entry, tmp.size,
1823 tmp.num_entries, tmp.hook_entry,
1828 duprintf("compat_do_replace: Translated table\n");
1830 ret = __do_replace(tmp.name, tmp.valid_hooks, newinfo,
1831 tmp.num_counters, compat_ptr(tmp.counters));
1833 goto free_newinfo_untrans;
1836 free_newinfo_untrans:
1837 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1839 xt_free_table_info(newinfo);
1844 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1849 if (!capable(CAP_NET_ADMIN))
1853 case IPT_SO_SET_REPLACE:
1854 ret = compat_do_replace(user, len);
1857 case IPT_SO_SET_ADD_COUNTERS:
1858 ret = do_add_counters(user, len, 1);
1862 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1869 struct compat_ipt_get_entries {
1870 char name[IPT_TABLE_MAXNAMELEN];
1872 struct compat_ipt_entry entrytable[0];
1876 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1877 void __user *userptr)
1879 struct xt_counters *counters;
1880 struct xt_table_info *private = table->private;
1884 void *loc_cpu_entry;
1887 counters = alloc_counters(table);
1888 if (IS_ERR(counters))
1889 return PTR_ERR(counters);
1891 /* choose the copy that is on our node/cpu, ...
1892 * This choice is lazy (because current thread is
1893 * allowed to migrate to another cpu)
1895 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1898 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1899 compat_copy_entry_to_user,
1900 &pos, &size, counters, &i);
1907 compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1910 struct compat_ipt_get_entries get;
1913 if (*len < sizeof(get)) {
1914 duprintf("compat_get_entries: %u < %u\n",
1915 *len, (unsigned int)sizeof(get));
1919 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1922 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1923 duprintf("compat_get_entries: %u != %u\n", *len,
1924 (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1929 xt_compat_lock(AF_INET);
1930 t = xt_find_table_lock(AF_INET, get.name);
1931 if (t && !IS_ERR(t)) {
1932 struct xt_table_info *private = t->private;
1933 struct xt_table_info info;
1934 duprintf("t->private->number = %u\n", private->number);
1935 ret = compat_table_info(private, &info);
1936 if (!ret && get.size == info.size) {
1937 ret = compat_copy_entries_to_user(private->size,
1938 t, uptr->entrytable);
1940 duprintf("compat_get_entries: I've got %u not %u!\n",
1941 private->size, get.size);
1944 xt_compat_flush_offsets(AF_INET);
1948 ret = t ? PTR_ERR(t) : -ENOENT;
1950 xt_compat_unlock(AF_INET);
1954 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1957 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1961 if (!capable(CAP_NET_ADMIN))
1965 case IPT_SO_GET_INFO:
1966 ret = get_info(user, len, 1);
1968 case IPT_SO_GET_ENTRIES:
1969 ret = compat_get_entries(user, len);
1972 ret = do_ipt_get_ctl(sk, cmd, user, len);
1979 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1983 if (!capable(CAP_NET_ADMIN))
1987 case IPT_SO_SET_REPLACE:
1988 ret = do_replace(user, len);
1991 case IPT_SO_SET_ADD_COUNTERS:
1992 ret = do_add_counters(user, len, 0);
1996 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2004 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2008 if (!capable(CAP_NET_ADMIN))
2012 case IPT_SO_GET_INFO:
2013 ret = get_info(user, len, 0);
2016 case IPT_SO_GET_ENTRIES:
2017 ret = get_entries(user, len);
2020 case IPT_SO_GET_REVISION_MATCH:
2021 case IPT_SO_GET_REVISION_TARGET: {
2022 struct ipt_get_revision rev;
2025 if (*len != sizeof(rev)) {
2029 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2034 if (cmd == IPT_SO_GET_REVISION_TARGET)
2039 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2042 "ipt_%s", rev.name);
2047 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2054 int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2057 struct xt_table_info *newinfo;
2058 struct xt_table_info bootstrap
2059 = { 0, 0, 0, { 0 }, { 0 }, { } };
2060 void *loc_cpu_entry;
2062 newinfo = xt_alloc_table_info(repl->size);
2066 /* choose the copy on our node/cpu, but dont care about preemption */
2067 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2068 memcpy(loc_cpu_entry, repl->entries, repl->size);
2070 ret = translate_table(table->name, table->valid_hooks,
2071 newinfo, loc_cpu_entry, repl->size,
2076 xt_free_table_info(newinfo);
2080 ret = xt_register_table(table, &bootstrap, newinfo);
2082 xt_free_table_info(newinfo);
2089 void ipt_unregister_table(struct xt_table *table)
2091 struct xt_table_info *private;
2092 void *loc_cpu_entry;
2094 private = xt_unregister_table(table);
2096 /* Decrease module usage counts and free resources */
2097 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2098 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2099 xt_free_table_info(private);
2102 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2104 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2105 u_int8_t type, u_int8_t code,
2108 return ((test_type == 0xFF) ||
2109 (type == test_type && code >= min_code && code <= max_code))
2114 icmp_match(const struct sk_buff *skb,
2115 const struct net_device *in,
2116 const struct net_device *out,
2117 const struct xt_match *match,
2118 const void *matchinfo,
2120 unsigned int protoff,
2123 struct icmphdr _icmph, *ic;
2124 const struct ipt_icmp *icmpinfo = matchinfo;
2126 /* Must not be a fragment. */
2130 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2132 /* We've been asked to examine this packet, and we
2133 * can't. Hence, no choice but to drop.
2135 duprintf("Dropping evil ICMP tinygram.\n");
2140 return icmp_type_code_match(icmpinfo->type,
2144 !!(icmpinfo->invflags&IPT_ICMP_INV));
2147 /* Called when user tries to insert an entry of this type. */
2149 icmp_checkentry(const char *tablename,
2151 const struct xt_match *match,
2153 unsigned int hook_mask)
2155 const struct ipt_icmp *icmpinfo = matchinfo;
2157 /* Must specify no unknown invflags */
2158 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2161 /* The built-in targets: standard (NULL) and error. */
2162 static struct xt_target ipt_standard_target __read_mostly = {
2163 .name = IPT_STANDARD_TARGET,
2164 .targetsize = sizeof(int),
2166 #ifdef CONFIG_COMPAT
2167 .compatsize = sizeof(compat_int_t),
2168 .compat_from_user = compat_standard_from_user,
2169 .compat_to_user = compat_standard_to_user,
2173 static struct xt_target ipt_error_target __read_mostly = {
2174 .name = IPT_ERROR_TARGET,
2175 .target = ipt_error,
2176 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2180 static struct nf_sockopt_ops ipt_sockopts = {
2182 .set_optmin = IPT_BASE_CTL,
2183 .set_optmax = IPT_SO_SET_MAX+1,
2184 .set = do_ipt_set_ctl,
2185 #ifdef CONFIG_COMPAT
2186 .compat_set = compat_do_ipt_set_ctl,
2188 .get_optmin = IPT_BASE_CTL,
2189 .get_optmax = IPT_SO_GET_MAX+1,
2190 .get = do_ipt_get_ctl,
2191 #ifdef CONFIG_COMPAT
2192 .compat_get = compat_do_ipt_get_ctl,
2194 .owner = THIS_MODULE,
2197 static struct xt_match icmp_matchstruct __read_mostly = {
2199 .match = icmp_match,
2200 .matchsize = sizeof(struct ipt_icmp),
2201 .checkentry = icmp_checkentry,
2202 .proto = IPPROTO_ICMP,
2206 static int __init ip_tables_init(void)
2210 ret = xt_proto_init(AF_INET);
2214 /* Noone else will be downing sem now, so we won't sleep */
2215 ret = xt_register_target(&ipt_standard_target);
2218 ret = xt_register_target(&ipt_error_target);
2221 ret = xt_register_match(&icmp_matchstruct);
2225 /* Register setsockopt */
2226 ret = nf_register_sockopt(&ipt_sockopts);
2230 printk(KERN_INFO "ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2234 xt_unregister_match(&icmp_matchstruct);
2236 xt_unregister_target(&ipt_error_target);
2238 xt_unregister_target(&ipt_standard_target);
2240 xt_proto_fini(AF_INET);
2245 static void __exit ip_tables_fini(void)
2247 nf_unregister_sockopt(&ipt_sockopts);
2249 xt_unregister_match(&icmp_matchstruct);
2250 xt_unregister_target(&ipt_error_target);
2251 xt_unregister_target(&ipt_standard_target);
2253 xt_proto_fini(AF_INET);
2256 EXPORT_SYMBOL(ipt_register_table);
2257 EXPORT_SYMBOL(ipt_unregister_table);
2258 EXPORT_SYMBOL(ipt_do_table);
2259 module_init(ip_tables_init);
2260 module_exit(ip_tables_fini);