2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/cache.h>
12 #include <linux/capability.h>
13 #include <linux/skbuff.h>
14 #include <linux/kmod.h>
15 #include <linux/vmalloc.h>
16 #include <linux/netdevice.h>
17 #include <linux/module.h>
18 #include <linux/icmp.h>
20 #include <net/compat.h>
21 #include <asm/uaccess.h>
22 #include <linux/mutex.h>
23 #include <linux/proc_fs.h>
24 #include <linux/err.h>
25 #include <linux/cpumask.h>
27 #include <linux/netfilter/x_tables.h>
28 #include <linux/netfilter_ipv4/ip_tables.h>
30 MODULE_LICENSE("GPL");
31 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
32 MODULE_DESCRIPTION("IPv4 packet filter");
34 /*#define DEBUG_IP_FIREWALL*/
35 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
36 /*#define DEBUG_IP_FIREWALL_USER*/
38 #ifdef DEBUG_IP_FIREWALL
39 #define dprintf(format, args...) printk(format , ## args)
41 #define dprintf(format, args...)
44 #ifdef DEBUG_IP_FIREWALL_USER
45 #define duprintf(format, args...) printk(format , ## args)
47 #define duprintf(format, args...)
50 #ifdef CONFIG_NETFILTER_DEBUG
51 #define IP_NF_ASSERT(x) \
54 printk("IP_NF_ASSERT: %s:%s:%u\n", \
55 __FUNCTION__, __FILE__, __LINE__); \
58 #define IP_NF_ASSERT(x)
62 /* All the better to debug you with... */
68 We keep a set of rules for each CPU, so we can avoid write-locking
69 them in the softirq when updating the counters and therefore
70 only need to read-lock in the softirq; doing a write_lock_bh() in user
71 context stops packets coming through and allows user context to read
72 the counters or update the rules.
74 Hence the start of any table is given by get_table() below. */
76 /* Returns whether matches rule or not. */
78 ip_packet_match(const struct iphdr *ip,
81 const struct ipt_ip *ipinfo,
87 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
89 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
91 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
93 dprintf("Source or dest mismatch.\n");
95 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
97 NIPQUAD(ipinfo->smsk.s_addr),
98 NIPQUAD(ipinfo->src.s_addr),
99 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
100 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
102 NIPQUAD(ipinfo->dmsk.s_addr),
103 NIPQUAD(ipinfo->dst.s_addr),
104 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
108 /* Look for ifname matches; this should unroll nicely. */
109 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
110 ret |= (((const unsigned long *)indev)[i]
111 ^ ((const unsigned long *)ipinfo->iniface)[i])
112 & ((const unsigned long *)ipinfo->iniface_mask)[i];
115 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
116 dprintf("VIA in mismatch (%s vs %s).%s\n",
117 indev, ipinfo->iniface,
118 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
122 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
123 ret |= (((const unsigned long *)outdev)[i]
124 ^ ((const unsigned long *)ipinfo->outiface)[i])
125 & ((const unsigned long *)ipinfo->outiface_mask)[i];
128 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
129 dprintf("VIA out mismatch (%s vs %s).%s\n",
130 outdev, ipinfo->outiface,
131 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
135 /* Check specific protocol */
137 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
138 dprintf("Packet protocol %hi does not match %hi.%s\n",
139 ip->protocol, ipinfo->proto,
140 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
144 /* If we have a fragment rule but the packet is not a fragment
145 * then we return zero */
146 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
147 dprintf("Fragment rule but not fragment.%s\n",
148 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
156 ip_checkentry(const struct ipt_ip *ip)
158 if (ip->flags & ~IPT_F_MASK) {
159 duprintf("Unknown flag bits set: %08X\n",
160 ip->flags & ~IPT_F_MASK);
163 if (ip->invflags & ~IPT_INV_MASK) {
164 duprintf("Unknown invflag bits set: %08X\n",
165 ip->invflags & ~IPT_INV_MASK);
172 ipt_error(struct sk_buff **pskb,
173 const struct net_device *in,
174 const struct net_device *out,
175 unsigned int hooknum,
176 const struct xt_target *target,
177 const void *targinfo)
180 printk("ip_tables: error: `%s'\n", (char *)targinfo);
186 int do_match(struct ipt_entry_match *m,
187 const struct sk_buff *skb,
188 const struct net_device *in,
189 const struct net_device *out,
193 /* Stop iteration if it doesn't match */
194 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
195 offset, ip_hdrlen(skb), hotdrop))
201 static inline struct ipt_entry *
202 get_entry(void *base, unsigned int offset)
204 return (struct ipt_entry *)(base + offset);
207 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
209 ipt_do_table(struct sk_buff **pskb,
211 const struct net_device *in,
212 const struct net_device *out,
213 struct xt_table *table)
215 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
220 /* Initializing verdict to NF_DROP keeps gcc happy. */
221 unsigned int verdict = NF_DROP;
222 const char *indev, *outdev;
224 struct ipt_entry *e, *back;
225 struct xt_table_info *private;
229 datalen = (*pskb)->len - ip->ihl * 4;
230 indev = in ? in->name : nulldevname;
231 outdev = out ? out->name : nulldevname;
232 /* We handle fragments by dealing with the first fragment as
233 * if it was a normal packet. All other fragments are treated
234 * normally, except that they will NEVER match rules that ask
235 * things we don't know, ie. tcp syn flag or ports). If the
236 * rule is also a fragment-specific rule, non-fragments won't
238 offset = ntohs(ip->frag_off) & IP_OFFSET;
240 read_lock_bh(&table->lock);
241 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
242 private = table->private;
243 table_base = (void *)private->entries[smp_processor_id()];
244 e = get_entry(table_base, private->hook_entry[hook]);
246 /* For return from builtin chain */
247 back = get_entry(table_base, private->underflow[hook]);
252 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
253 struct ipt_entry_target *t;
255 if (IPT_MATCH_ITERATE(e, do_match,
257 offset, &hotdrop) != 0)
260 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
262 t = ipt_get_target(e);
263 IP_NF_ASSERT(t->u.kernel.target);
264 /* Standard target? */
265 if (!t->u.kernel.target->target) {
268 v = ((struct ipt_standard_target *)t)->verdict;
270 /* Pop from stack? */
271 if (v != IPT_RETURN) {
272 verdict = (unsigned)(-v) - 1;
276 back = get_entry(table_base,
280 if (table_base + v != (void *)e + e->next_offset
281 && !(e->ip.flags & IPT_F_GOTO)) {
282 /* Save old back ptr in next entry */
283 struct ipt_entry *next
284 = (void *)e + e->next_offset;
286 = (void *)back - table_base;
287 /* set back pointer to next entry */
291 e = get_entry(table_base, v);
293 /* Targets which reenter must return
295 #ifdef CONFIG_NETFILTER_DEBUG
296 ((struct ipt_entry *)table_base)->comefrom
299 verdict = t->u.kernel.target->target(pskb,
305 #ifdef CONFIG_NETFILTER_DEBUG
306 if (((struct ipt_entry *)table_base)->comefrom
308 && verdict == IPT_CONTINUE) {
309 printk("Target %s reentered!\n",
310 t->u.kernel.target->name);
313 ((struct ipt_entry *)table_base)->comefrom
316 /* Target might have changed stuff. */
318 datalen = (*pskb)->len - ip->ihl * 4;
320 if (verdict == IPT_CONTINUE)
321 e = (void *)e + e->next_offset;
329 e = (void *)e + e->next_offset;
333 read_unlock_bh(&table->lock);
335 #ifdef DEBUG_ALLOW_ALL
344 /* All zeroes == unconditional rule. */
346 unconditional(const struct ipt_ip *ip)
350 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
351 if (((__u32 *)ip)[i])
357 /* Figures out from what hook each rule can be called: returns 0 if
358 there are loops. Puts hook bitmask in comefrom. */
360 mark_source_chains(struct xt_table_info *newinfo,
361 unsigned int valid_hooks, void *entry0)
365 /* No recursion; use packet counter to save back ptrs (reset
366 to 0 as we leave), and comefrom to save source hook bitmask */
367 for (hook = 0; hook < NF_IP_NUMHOOKS; hook++) {
368 unsigned int pos = newinfo->hook_entry[hook];
370 = (struct ipt_entry *)(entry0 + pos);
372 if (!(valid_hooks & (1 << hook)))
375 /* Set initial back pointer. */
376 e->counters.pcnt = pos;
379 struct ipt_standard_target *t
380 = (void *)ipt_get_target(e);
381 int visited = e->comefrom & (1 << hook);
383 if (e->comefrom & (1 << NF_IP_NUMHOOKS)) {
384 printk("iptables: loop hook %u pos %u %08X.\n",
385 hook, pos, e->comefrom);
389 |= ((1 << hook) | (1 << NF_IP_NUMHOOKS));
391 /* Unconditional return/END. */
392 if ((e->target_offset == sizeof(struct ipt_entry)
393 && (strcmp(t->target.u.user.name,
394 IPT_STANDARD_TARGET) == 0)
396 && unconditional(&e->ip)) || visited) {
397 unsigned int oldpos, size;
399 if (t->verdict < -NF_MAX_VERDICT - 1) {
400 duprintf("mark_source_chains: bad "
401 "negative verdict (%i)\n",
406 /* Return: backtrack through the last
409 e->comefrom ^= (1<<NF_IP_NUMHOOKS);
410 #ifdef DEBUG_IP_FIREWALL_USER
412 & (1 << NF_IP_NUMHOOKS)) {
413 duprintf("Back unset "
420 pos = e->counters.pcnt;
421 e->counters.pcnt = 0;
423 /* We're at the start. */
427 e = (struct ipt_entry *)
429 } while (oldpos == pos + e->next_offset);
432 size = e->next_offset;
433 e = (struct ipt_entry *)
434 (entry0 + pos + size);
435 e->counters.pcnt = pos;
438 int newpos = t->verdict;
440 if (strcmp(t->target.u.user.name,
441 IPT_STANDARD_TARGET) == 0
443 if (newpos > newinfo->size -
444 sizeof(struct ipt_entry)) {
445 duprintf("mark_source_chains: "
446 "bad verdict (%i)\n",
450 /* This a jump; chase it. */
451 duprintf("Jump rule %u -> %u\n",
454 /* ... this is a fallthru */
455 newpos = pos + e->next_offset;
457 e = (struct ipt_entry *)
459 e->counters.pcnt = pos;
464 duprintf("Finished chain %u\n", hook);
470 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
472 if (i && (*i)-- == 0)
475 if (m->u.kernel.match->destroy)
476 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
477 module_put(m->u.kernel.match->me);
482 check_entry(struct ipt_entry *e, const char *name)
484 struct ipt_entry_target *t;
486 if (!ip_checkentry(&e->ip)) {
487 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
491 if (e->target_offset + sizeof(struct ipt_entry_target) > e->next_offset)
494 t = ipt_get_target(e);
495 if (e->target_offset + t->u.target_size > e->next_offset)
501 static inline int check_match(struct ipt_entry_match *m, const char *name,
502 const struct ipt_ip *ip, unsigned int hookmask,
505 struct xt_match *match;
508 match = m->u.kernel.match;
509 ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
510 name, hookmask, ip->proto,
511 ip->invflags & IPT_INV_PROTO);
512 if (!ret && m->u.kernel.match->checkentry
513 && !m->u.kernel.match->checkentry(name, ip, match, m->data,
515 duprintf("ip_tables: check failed for `%s'.\n",
516 m->u.kernel.match->name);
525 find_check_match(struct ipt_entry_match *m,
527 const struct ipt_ip *ip,
528 unsigned int hookmask,
531 struct xt_match *match;
534 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
536 "ipt_%s", m->u.user.name);
537 if (IS_ERR(match) || !match) {
538 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
539 return match ? PTR_ERR(match) : -ENOENT;
541 m->u.kernel.match = match;
543 ret = check_match(m, name, ip, hookmask, i);
549 module_put(m->u.kernel.match->me);
553 static inline int check_target(struct ipt_entry *e, const char *name)
555 struct ipt_entry_target *t;
556 struct xt_target *target;
559 t = ipt_get_target(e);
560 target = t->u.kernel.target;
561 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
562 name, e->comefrom, e->ip.proto,
563 e->ip.invflags & IPT_INV_PROTO);
564 if (!ret && t->u.kernel.target->checkentry
565 && !t->u.kernel.target->checkentry(name, e, target,
566 t->data, e->comefrom)) {
567 duprintf("ip_tables: check failed for `%s'.\n",
568 t->u.kernel.target->name);
575 find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
578 struct ipt_entry_target *t;
579 struct xt_target *target;
583 ret = check_entry(e, name);
588 ret = IPT_MATCH_ITERATE(e, find_check_match, name, &e->ip,
591 goto cleanup_matches;
593 t = ipt_get_target(e);
594 target = try_then_request_module(xt_find_target(AF_INET,
597 "ipt_%s", t->u.user.name);
598 if (IS_ERR(target) || !target) {
599 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
600 ret = target ? PTR_ERR(target) : -ENOENT;
601 goto cleanup_matches;
603 t->u.kernel.target = target;
605 ret = check_target(e, name);
612 module_put(t->u.kernel.target->me);
614 IPT_MATCH_ITERATE(e, cleanup_match, &j);
619 check_entry_size_and_hooks(struct ipt_entry *e,
620 struct xt_table_info *newinfo,
622 unsigned char *limit,
623 const unsigned int *hook_entries,
624 const unsigned int *underflows,
629 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
630 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
631 duprintf("Bad offset %p\n", e);
636 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
637 duprintf("checking: element %p size %u\n",
642 /* Check hooks & underflows */
643 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
644 if ((unsigned char *)e - base == hook_entries[h])
645 newinfo->hook_entry[h] = hook_entries[h];
646 if ((unsigned char *)e - base == underflows[h])
647 newinfo->underflow[h] = underflows[h];
650 /* FIXME: underflows must be unconditional, standard verdicts
651 < 0 (not IPT_RETURN). --RR */
653 /* Clear counters and comefrom */
654 e->counters = ((struct xt_counters) { 0, 0 });
662 cleanup_entry(struct ipt_entry *e, unsigned int *i)
664 struct ipt_entry_target *t;
666 if (i && (*i)-- == 0)
669 /* Cleanup all matches */
670 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
671 t = ipt_get_target(e);
672 if (t->u.kernel.target->destroy)
673 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
674 module_put(t->u.kernel.target->me);
678 /* Checks and translates the user-supplied table segment (held in
681 translate_table(const char *name,
682 unsigned int valid_hooks,
683 struct xt_table_info *newinfo,
687 const unsigned int *hook_entries,
688 const unsigned int *underflows)
693 newinfo->size = size;
694 newinfo->number = number;
696 /* Init all hooks to impossible value. */
697 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
698 newinfo->hook_entry[i] = 0xFFFFFFFF;
699 newinfo->underflow[i] = 0xFFFFFFFF;
702 duprintf("translate_table: size %u\n", newinfo->size);
704 /* Walk through entries, checking offsets. */
705 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
706 check_entry_size_and_hooks,
710 hook_entries, underflows, &i);
715 duprintf("translate_table: %u not %u entries\n",
720 /* Check hooks all assigned */
721 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
722 /* Only hooks which are valid */
723 if (!(valid_hooks & (1 << i)))
725 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
726 duprintf("Invalid hook entry %u %u\n",
730 if (newinfo->underflow[i] == 0xFFFFFFFF) {
731 duprintf("Invalid underflow %u %u\n",
737 if (!mark_source_chains(newinfo, valid_hooks, entry0))
740 /* Finally, each sanity check must pass */
742 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
743 find_check_entry, name, size, &i);
746 IPT_ENTRY_ITERATE(entry0, newinfo->size,
751 /* And one copy for every other CPU */
752 for_each_possible_cpu(i) {
753 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
754 memcpy(newinfo->entries[i], entry0, newinfo->size);
762 add_entry_to_counter(const struct ipt_entry *e,
763 struct xt_counters total[],
766 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
773 set_entry_to_counter(const struct ipt_entry *e,
774 struct ipt_counters total[],
777 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
784 get_counters(const struct xt_table_info *t,
785 struct xt_counters counters[])
791 /* Instead of clearing (by a previous call to memset())
792 * the counters and using adds, we set the counters
793 * with data used by 'current' CPU
794 * We dont care about preemption here.
796 curcpu = raw_smp_processor_id();
799 IPT_ENTRY_ITERATE(t->entries[curcpu],
801 set_entry_to_counter,
805 for_each_possible_cpu(cpu) {
809 IPT_ENTRY_ITERATE(t->entries[cpu],
811 add_entry_to_counter,
817 static inline struct xt_counters * alloc_counters(struct xt_table *table)
819 unsigned int countersize;
820 struct xt_counters *counters;
821 struct xt_table_info *private = table->private;
823 /* We need atomic snapshot of counters: rest doesn't change
824 (other than comefrom, which userspace doesn't care
826 countersize = sizeof(struct xt_counters) * private->number;
827 counters = vmalloc_node(countersize, numa_node_id());
829 if (counters == NULL)
830 return ERR_PTR(-ENOMEM);
832 /* First, sum counters... */
833 write_lock_bh(&table->lock);
834 get_counters(private, counters);
835 write_unlock_bh(&table->lock);
841 copy_entries_to_user(unsigned int total_size,
842 struct xt_table *table,
843 void __user *userptr)
845 unsigned int off, num;
847 struct xt_counters *counters;
848 struct xt_table_info *private = table->private;
852 counters = alloc_counters(table);
853 if (IS_ERR(counters))
854 return PTR_ERR(counters);
856 /* choose the copy that is on our node/cpu, ...
857 * This choice is lazy (because current thread is
858 * allowed to migrate to another cpu)
860 loc_cpu_entry = private->entries[raw_smp_processor_id()];
861 /* ... then copy entire thing ... */
862 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
867 /* FIXME: use iterator macros --RR */
868 /* ... then go back and fix counters and names */
869 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
871 struct ipt_entry_match *m;
872 struct ipt_entry_target *t;
874 e = (struct ipt_entry *)(loc_cpu_entry + off);
875 if (copy_to_user(userptr + off
876 + offsetof(struct ipt_entry, counters),
878 sizeof(counters[num])) != 0) {
883 for (i = sizeof(struct ipt_entry);
884 i < e->target_offset;
885 i += m->u.match_size) {
888 if (copy_to_user(userptr + off + i
889 + offsetof(struct ipt_entry_match,
891 m->u.kernel.match->name,
892 strlen(m->u.kernel.match->name)+1)
899 t = ipt_get_target(e);
900 if (copy_to_user(userptr + off + e->target_offset
901 + offsetof(struct ipt_entry_target,
903 t->u.kernel.target->name,
904 strlen(t->u.kernel.target->name)+1) != 0) {
916 struct compat_delta {
917 struct compat_delta *next;
922 static struct compat_delta *compat_offsets = NULL;
924 static int compat_add_offset(unsigned int offset, short delta)
926 struct compat_delta *tmp;
928 tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
931 tmp->offset = offset;
933 if (compat_offsets) {
934 tmp->next = compat_offsets->next;
935 compat_offsets->next = tmp;
937 compat_offsets = tmp;
943 static void compat_flush_offsets(void)
945 struct compat_delta *tmp, *next;
947 if (compat_offsets) {
948 for(tmp = compat_offsets; tmp; tmp = next) {
952 compat_offsets = NULL;
956 static short compat_calc_jump(unsigned int offset)
958 struct compat_delta *tmp;
961 for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
962 if (tmp->offset < offset)
967 static void compat_standard_from_user(void *dst, void *src)
969 int v = *(compat_int_t *)src;
972 v += compat_calc_jump(v);
973 memcpy(dst, &v, sizeof(v));
976 static int compat_standard_to_user(void __user *dst, void *src)
978 compat_int_t cv = *(int *)src;
981 cv -= compat_calc_jump(cv);
982 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
986 compat_calc_match(struct ipt_entry_match *m, int * size)
988 *size += xt_compat_match_offset(m->u.kernel.match);
992 static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info,
993 void *base, struct xt_table_info *newinfo)
995 struct ipt_entry_target *t;
996 unsigned int entry_offset;
1000 entry_offset = (void *)e - base;
1001 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1002 t = ipt_get_target(e);
1003 off += xt_compat_target_offset(t->u.kernel.target);
1004 newinfo->size -= off;
1005 ret = compat_add_offset(entry_offset, off);
1009 for (i = 0; i< NF_IP_NUMHOOKS; i++) {
1010 if (info->hook_entry[i] && (e < (struct ipt_entry *)
1011 (base + info->hook_entry[i])))
1012 newinfo->hook_entry[i] -= off;
1013 if (info->underflow[i] && (e < (struct ipt_entry *)
1014 (base + info->underflow[i])))
1015 newinfo->underflow[i] -= off;
1020 static int compat_table_info(struct xt_table_info *info,
1021 struct xt_table_info *newinfo)
1023 void *loc_cpu_entry;
1026 if (!newinfo || !info)
1029 memset(newinfo, 0, sizeof(struct xt_table_info));
1030 newinfo->size = info->size;
1031 newinfo->number = info->number;
1032 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1033 newinfo->hook_entry[i] = info->hook_entry[i];
1034 newinfo->underflow[i] = info->underflow[i];
1036 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1037 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1038 compat_calc_entry, info, loc_cpu_entry, newinfo);
1042 static int get_info(void __user *user, int *len, int compat)
1044 char name[IPT_TABLE_MAXNAMELEN];
1048 if (*len != sizeof(struct ipt_getinfo)) {
1049 duprintf("length %u != %u\n", *len,
1050 (unsigned int)sizeof(struct ipt_getinfo));
1054 if (copy_from_user(name, user, sizeof(name)) != 0)
1057 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1058 #ifdef CONFIG_COMPAT
1060 xt_compat_lock(AF_INET);
1062 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1063 "iptable_%s", name);
1064 if (t && !IS_ERR(t)) {
1065 struct ipt_getinfo info;
1066 struct xt_table_info *private = t->private;
1068 #ifdef CONFIG_COMPAT
1070 struct xt_table_info tmp;
1071 ret = compat_table_info(private, &tmp);
1072 compat_flush_offsets();
1076 info.valid_hooks = t->valid_hooks;
1077 memcpy(info.hook_entry, private->hook_entry,
1078 sizeof(info.hook_entry));
1079 memcpy(info.underflow, private->underflow,
1080 sizeof(info.underflow));
1081 info.num_entries = private->number;
1082 info.size = private->size;
1083 strcpy(info.name, name);
1085 if (copy_to_user(user, &info, *len) != 0)
1093 ret = t ? PTR_ERR(t) : -ENOENT;
1094 #ifdef CONFIG_COMPAT
1096 xt_compat_unlock(AF_INET);
1102 get_entries(struct ipt_get_entries __user *uptr, int *len)
1105 struct ipt_get_entries get;
1108 if (*len < sizeof(get)) {
1109 duprintf("get_entries: %u < %d\n", *len,
1110 (unsigned int)sizeof(get));
1113 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1115 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1116 duprintf("get_entries: %u != %u\n", *len,
1117 (unsigned int)(sizeof(struct ipt_get_entries) +
1122 t = xt_find_table_lock(AF_INET, get.name);
1123 if (t && !IS_ERR(t)) {
1124 struct xt_table_info *private = t->private;
1125 duprintf("t->private->number = %u\n",
1127 if (get.size == private->size)
1128 ret = copy_entries_to_user(private->size,
1129 t, uptr->entrytable);
1131 duprintf("get_entries: I've got %u not %u!\n",
1139 ret = t ? PTR_ERR(t) : -ENOENT;
1145 __do_replace(const char *name, unsigned int valid_hooks,
1146 struct xt_table_info *newinfo, unsigned int num_counters,
1147 void __user *counters_ptr)
1151 struct xt_table_info *oldinfo;
1152 struct xt_counters *counters;
1153 void *loc_cpu_old_entry;
1156 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1162 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1163 "iptable_%s", name);
1164 if (!t || IS_ERR(t)) {
1165 ret = t ? PTR_ERR(t) : -ENOENT;
1166 goto free_newinfo_counters_untrans;
1170 if (valid_hooks != t->valid_hooks) {
1171 duprintf("Valid hook crap: %08X vs %08X\n",
1172 valid_hooks, t->valid_hooks);
1177 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1181 /* Update module usage count based on number of rules */
1182 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1183 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1184 if ((oldinfo->number > oldinfo->initial_entries) ||
1185 (newinfo->number <= oldinfo->initial_entries))
1187 if ((oldinfo->number > oldinfo->initial_entries) &&
1188 (newinfo->number <= oldinfo->initial_entries))
1191 /* Get the old counters. */
1192 get_counters(oldinfo, counters);
1193 /* Decrease module usage counts and free resource */
1194 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1195 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1196 xt_free_table_info(oldinfo);
1197 if (copy_to_user(counters_ptr, counters,
1198 sizeof(struct xt_counters) * num_counters) != 0)
1207 free_newinfo_counters_untrans:
1214 do_replace(void __user *user, unsigned int len)
1217 struct ipt_replace tmp;
1218 struct xt_table_info *newinfo;
1219 void *loc_cpu_entry;
1221 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1224 /* Hack: Causes ipchains to give correct error msg --RR */
1225 if (len != sizeof(tmp) + tmp.size)
1226 return -ENOPROTOOPT;
1228 /* overflow check */
1229 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1232 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1235 newinfo = xt_alloc_table_info(tmp.size);
1239 /* choose the copy that is our node/cpu */
1240 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1241 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1247 ret = translate_table(tmp.name, tmp.valid_hooks,
1248 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1249 tmp.hook_entry, tmp.underflow);
1253 duprintf("ip_tables: Translated table\n");
1255 ret = __do_replace(tmp.name, tmp.valid_hooks,
1256 newinfo, tmp.num_counters,
1259 goto free_newinfo_untrans;
1262 free_newinfo_untrans:
1263 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1265 xt_free_table_info(newinfo);
1269 /* We're lazy, and add to the first CPU; overflow works its fey magic
1270 * and everything is OK. */
1272 add_counter_to_entry(struct ipt_entry *e,
1273 const struct xt_counters addme[],
1277 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1279 (long unsigned int)e->counters.pcnt,
1280 (long unsigned int)e->counters.bcnt,
1281 (long unsigned int)addme[*i].pcnt,
1282 (long unsigned int)addme[*i].bcnt);
1285 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1292 do_add_counters(void __user *user, unsigned int len, int compat)
1295 struct xt_counters_info tmp;
1296 struct xt_counters *paddc;
1297 unsigned int num_counters;
1302 struct xt_table_info *private;
1304 void *loc_cpu_entry;
1305 #ifdef CONFIG_COMPAT
1306 struct compat_xt_counters_info compat_tmp;
1310 size = sizeof(struct compat_xt_counters_info);
1315 size = sizeof(struct xt_counters_info);
1318 if (copy_from_user(ptmp, user, size) != 0)
1321 #ifdef CONFIG_COMPAT
1323 num_counters = compat_tmp.num_counters;
1324 name = compat_tmp.name;
1328 num_counters = tmp.num_counters;
1332 if (len != size + num_counters * sizeof(struct xt_counters))
1335 paddc = vmalloc_node(len - size, numa_node_id());
1339 if (copy_from_user(paddc, user + size, len - size) != 0) {
1344 t = xt_find_table_lock(AF_INET, name);
1345 if (!t || IS_ERR(t)) {
1346 ret = t ? PTR_ERR(t) : -ENOENT;
1350 write_lock_bh(&t->lock);
1351 private = t->private;
1352 if (private->number != num_counters) {
1354 goto unlock_up_free;
1358 /* Choose the copy that is on our node */
1359 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1360 IPT_ENTRY_ITERATE(loc_cpu_entry,
1362 add_counter_to_entry,
1366 write_unlock_bh(&t->lock);
1375 #ifdef CONFIG_COMPAT
1376 struct compat_ipt_replace {
1377 char name[IPT_TABLE_MAXNAMELEN];
1381 u32 hook_entry[NF_IP_NUMHOOKS];
1382 u32 underflow[NF_IP_NUMHOOKS];
1384 compat_uptr_t counters; /* struct ipt_counters * */
1385 struct compat_ipt_entry entries[0];
1388 static inline int compat_copy_match_to_user(struct ipt_entry_match *m,
1389 void __user **dstptr, compat_uint_t *size)
1391 return xt_compat_match_to_user(m, dstptr, size);
1394 static int compat_copy_entry_to_user(struct ipt_entry *e,
1395 void __user **dstptr, compat_uint_t *size)
1397 struct ipt_entry_target *t;
1398 struct compat_ipt_entry __user *ce;
1399 u_int16_t target_offset, next_offset;
1400 compat_uint_t origsize;
1405 ce = (struct compat_ipt_entry __user *)*dstptr;
1406 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1409 *dstptr += sizeof(struct compat_ipt_entry);
1410 ret = IPT_MATCH_ITERATE(e, compat_copy_match_to_user, dstptr, size);
1411 target_offset = e->target_offset - (origsize - *size);
1414 t = ipt_get_target(e);
1415 ret = xt_compat_target_to_user(t, dstptr, size);
1419 next_offset = e->next_offset - (origsize - *size);
1420 if (put_user(target_offset, &ce->target_offset))
1422 if (put_user(next_offset, &ce->next_offset))
1430 compat_find_calc_match(struct ipt_entry_match *m,
1432 const struct ipt_ip *ip,
1433 unsigned int hookmask,
1436 struct xt_match *match;
1438 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1439 m->u.user.revision),
1440 "ipt_%s", m->u.user.name);
1441 if (IS_ERR(match) || !match) {
1442 duprintf("compat_check_calc_match: `%s' not found\n",
1444 return match ? PTR_ERR(match) : -ENOENT;
1446 m->u.kernel.match = match;
1447 *size += xt_compat_match_offset(match);
1454 compat_release_match(struct ipt_entry_match *m, unsigned int *i)
1456 if (i && (*i)-- == 0)
1459 module_put(m->u.kernel.match->me);
1464 compat_release_entry(struct ipt_entry *e, unsigned int *i)
1466 struct ipt_entry_target *t;
1468 if (i && (*i)-- == 0)
1471 /* Cleanup all matches */
1472 IPT_MATCH_ITERATE(e, compat_release_match, NULL);
1473 t = ipt_get_target(e);
1474 module_put(t->u.kernel.target->me);
1479 check_compat_entry_size_and_hooks(struct ipt_entry *e,
1480 struct xt_table_info *newinfo,
1482 unsigned char *base,
1483 unsigned char *limit,
1484 unsigned int *hook_entries,
1485 unsigned int *underflows,
1489 struct ipt_entry_target *t;
1490 struct xt_target *target;
1491 unsigned int entry_offset;
1494 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1495 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1496 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1497 duprintf("Bad offset %p, limit = %p\n", e, limit);
1501 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1502 sizeof(struct compat_xt_entry_target)) {
1503 duprintf("checking: element %p size %u\n",
1508 ret = check_entry(e, name);
1513 entry_offset = (void *)e - (void *)base;
1515 ret = IPT_MATCH_ITERATE(e, compat_find_calc_match, name, &e->ip,
1516 e->comefrom, &off, &j);
1518 goto release_matches;
1520 t = ipt_get_target(e);
1521 target = try_then_request_module(xt_find_target(AF_INET,
1523 t->u.user.revision),
1524 "ipt_%s", t->u.user.name);
1525 if (IS_ERR(target) || !target) {
1526 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1528 ret = target ? PTR_ERR(target) : -ENOENT;
1529 goto release_matches;
1531 t->u.kernel.target = target;
1533 off += xt_compat_target_offset(target);
1535 ret = compat_add_offset(entry_offset, off);
1539 /* Check hooks & underflows */
1540 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1541 if ((unsigned char *)e - base == hook_entries[h])
1542 newinfo->hook_entry[h] = hook_entries[h];
1543 if ((unsigned char *)e - base == underflows[h])
1544 newinfo->underflow[h] = underflows[h];
1547 /* Clear counters and comefrom */
1548 e->counters = ((struct ipt_counters) { 0, 0 });
1555 module_put(t->u.kernel.target->me);
1557 IPT_MATCH_ITERATE(e, compat_release_match, &j);
1561 static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
1562 void **dstptr, compat_uint_t *size, const char *name,
1563 const struct ipt_ip *ip, unsigned int hookmask)
1565 xt_compat_match_from_user(m, dstptr, size);
1569 static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
1570 unsigned int *size, const char *name,
1571 struct xt_table_info *newinfo, unsigned char *base)
1573 struct ipt_entry_target *t;
1574 struct xt_target *target;
1575 struct ipt_entry *de;
1576 unsigned int origsize;
1581 de = (struct ipt_entry *)*dstptr;
1582 memcpy(de, e, sizeof(struct ipt_entry));
1584 *dstptr += sizeof(struct compat_ipt_entry);
1585 ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
1586 name, &de->ip, de->comefrom);
1589 de->target_offset = e->target_offset - (origsize - *size);
1590 t = ipt_get_target(e);
1591 target = t->u.kernel.target;
1592 xt_compat_target_from_user(t, dstptr, size);
1594 de->next_offset = e->next_offset - (origsize - *size);
1595 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1596 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1597 newinfo->hook_entry[h] -= origsize - *size;
1598 if ((unsigned char *)de - base < newinfo->underflow[h])
1599 newinfo->underflow[h] -= origsize - *size;
1604 static inline int compat_check_entry(struct ipt_entry *e, const char *name,
1610 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
1612 goto cleanup_matches;
1614 ret = check_target(e, name);
1616 goto cleanup_matches;
1622 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1627 translate_compat_table(const char *name,
1628 unsigned int valid_hooks,
1629 struct xt_table_info **pinfo,
1631 unsigned int total_size,
1632 unsigned int number,
1633 unsigned int *hook_entries,
1634 unsigned int *underflows)
1637 struct xt_table_info *newinfo, *info;
1638 void *pos, *entry0, *entry1;
1645 info->number = number;
1647 /* Init all hooks to impossible value. */
1648 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1649 info->hook_entry[i] = 0xFFFFFFFF;
1650 info->underflow[i] = 0xFFFFFFFF;
1653 duprintf("translate_compat_table: size %u\n", info->size);
1655 xt_compat_lock(AF_INET);
1656 /* Walk through entries, checking offsets. */
1657 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1658 check_compat_entry_size_and_hooks,
1659 info, &size, entry0,
1660 entry0 + total_size,
1661 hook_entries, underflows, &j, name);
1667 duprintf("translate_compat_table: %u not %u entries\n",
1672 /* Check hooks all assigned */
1673 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1674 /* Only hooks which are valid */
1675 if (!(valid_hooks & (1 << i)))
1677 if (info->hook_entry[i] == 0xFFFFFFFF) {
1678 duprintf("Invalid hook entry %u %u\n",
1679 i, hook_entries[i]);
1682 if (info->underflow[i] == 0xFFFFFFFF) {
1683 duprintf("Invalid underflow %u %u\n",
1690 newinfo = xt_alloc_table_info(size);
1694 newinfo->number = number;
1695 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1696 newinfo->hook_entry[i] = info->hook_entry[i];
1697 newinfo->underflow[i] = info->underflow[i];
1699 entry1 = newinfo->entries[raw_smp_processor_id()];
1702 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1703 compat_copy_entry_from_user, &pos, &size,
1704 name, newinfo, entry1);
1705 compat_flush_offsets();
1706 xt_compat_unlock(AF_INET);
1711 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1715 ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1719 IPT_ENTRY_ITERATE_CONTINUE(entry1, newinfo->size, i,
1720 compat_release_entry, &j);
1721 IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1722 xt_free_table_info(newinfo);
1726 /* And one copy for every other CPU */
1727 for_each_possible_cpu(i)
1728 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1729 memcpy(newinfo->entries[i], entry1, newinfo->size);
1733 xt_free_table_info(info);
1737 xt_free_table_info(newinfo);
1739 IPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1742 compat_flush_offsets();
1743 xt_compat_unlock(AF_INET);
1748 compat_do_replace(void __user *user, unsigned int len)
1751 struct compat_ipt_replace tmp;
1752 struct xt_table_info *newinfo;
1753 void *loc_cpu_entry;
1755 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1758 /* Hack: Causes ipchains to give correct error msg --RR */
1759 if (len != sizeof(tmp) + tmp.size)
1760 return -ENOPROTOOPT;
1762 /* overflow check */
1763 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1766 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1769 newinfo = xt_alloc_table_info(tmp.size);
1773 /* choose the copy that is our node/cpu */
1774 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1775 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1781 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1782 &newinfo, &loc_cpu_entry, tmp.size,
1783 tmp.num_entries, tmp.hook_entry, tmp.underflow);
1787 duprintf("compat_do_replace: Translated table\n");
1789 ret = __do_replace(tmp.name, tmp.valid_hooks,
1790 newinfo, tmp.num_counters,
1791 compat_ptr(tmp.counters));
1793 goto free_newinfo_untrans;
1796 free_newinfo_untrans:
1797 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1799 xt_free_table_info(newinfo);
1804 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1809 if (!capable(CAP_NET_ADMIN))
1813 case IPT_SO_SET_REPLACE:
1814 ret = compat_do_replace(user, len);
1817 case IPT_SO_SET_ADD_COUNTERS:
1818 ret = do_add_counters(user, len, 1);
1822 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1829 struct compat_ipt_get_entries
1831 char name[IPT_TABLE_MAXNAMELEN];
1833 struct compat_ipt_entry entrytable[0];
1836 static int compat_copy_entries_to_user(unsigned int total_size,
1837 struct xt_table *table, void __user *userptr)
1839 unsigned int off, num;
1840 struct compat_ipt_entry e;
1841 struct xt_counters *counters;
1842 struct xt_table_info *private = table->private;
1846 void *loc_cpu_entry;
1848 counters = alloc_counters(table);
1849 if (IS_ERR(counters))
1850 return PTR_ERR(counters);
1852 /* choose the copy that is on our node/cpu, ...
1853 * This choice is lazy (because current thread is
1854 * allowed to migrate to another cpu)
1856 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1859 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1860 compat_copy_entry_to_user, &pos, &size);
1864 /* ... then go back and fix counters and names */
1865 for (off = 0, num = 0; off < size; off += e.next_offset, num++) {
1867 struct ipt_entry_match m;
1868 struct ipt_entry_target t;
1871 if (copy_from_user(&e, userptr + off,
1872 sizeof(struct compat_ipt_entry)))
1874 if (copy_to_user(userptr + off +
1875 offsetof(struct compat_ipt_entry, counters),
1876 &counters[num], sizeof(counters[num])))
1879 for (i = sizeof(struct compat_ipt_entry);
1880 i < e.target_offset; i += m.u.match_size) {
1881 if (copy_from_user(&m, userptr + off + i,
1882 sizeof(struct ipt_entry_match)))
1884 if (copy_to_user(userptr + off + i +
1885 offsetof(struct ipt_entry_match, u.user.name),
1886 m.u.kernel.match->name,
1887 strlen(m.u.kernel.match->name) + 1))
1891 if (copy_from_user(&t, userptr + off + e.target_offset,
1892 sizeof(struct ipt_entry_target)))
1894 if (copy_to_user(userptr + off + e.target_offset +
1895 offsetof(struct ipt_entry_target, u.user.name),
1896 t.u.kernel.target->name,
1897 strlen(t.u.kernel.target->name) + 1))
1907 compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1910 struct compat_ipt_get_entries get;
1914 if (*len < sizeof(get)) {
1915 duprintf("compat_get_entries: %u < %u\n",
1916 *len, (unsigned int)sizeof(get));
1920 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1923 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1924 duprintf("compat_get_entries: %u != %u\n", *len,
1925 (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1930 xt_compat_lock(AF_INET);
1931 t = xt_find_table_lock(AF_INET, get.name);
1932 if (t && !IS_ERR(t)) {
1933 struct xt_table_info *private = t->private;
1934 struct xt_table_info info;
1935 duprintf("t->private->number = %u\n",
1937 ret = compat_table_info(private, &info);
1938 if (!ret && get.size == info.size) {
1939 ret = compat_copy_entries_to_user(private->size,
1940 t, uptr->entrytable);
1942 duprintf("compat_get_entries: I've got %u not %u!\n",
1947 compat_flush_offsets();
1951 ret = t ? PTR_ERR(t) : -ENOENT;
1953 xt_compat_unlock(AF_INET);
1957 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1960 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1964 if (!capable(CAP_NET_ADMIN))
1968 case IPT_SO_GET_INFO:
1969 ret = get_info(user, len, 1);
1971 case IPT_SO_GET_ENTRIES:
1972 ret = compat_get_entries(user, len);
1975 ret = do_ipt_get_ctl(sk, cmd, user, len);
1982 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1986 if (!capable(CAP_NET_ADMIN))
1990 case IPT_SO_SET_REPLACE:
1991 ret = do_replace(user, len);
1994 case IPT_SO_SET_ADD_COUNTERS:
1995 ret = do_add_counters(user, len, 0);
1999 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2007 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2011 if (!capable(CAP_NET_ADMIN))
2015 case IPT_SO_GET_INFO:
2016 ret = get_info(user, len, 0);
2019 case IPT_SO_GET_ENTRIES:
2020 ret = get_entries(user, len);
2023 case IPT_SO_GET_REVISION_MATCH:
2024 case IPT_SO_GET_REVISION_TARGET: {
2025 struct ipt_get_revision rev;
2028 if (*len != sizeof(rev)) {
2032 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2037 if (cmd == IPT_SO_GET_REVISION_TARGET)
2042 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2045 "ipt_%s", rev.name);
2050 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2057 int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2060 struct xt_table_info *newinfo;
2061 static struct xt_table_info bootstrap
2062 = { 0, 0, 0, { 0 }, { 0 }, { } };
2063 void *loc_cpu_entry;
2065 newinfo = xt_alloc_table_info(repl->size);
2069 /* choose the copy on our node/cpu
2070 * but dont care of preemption
2072 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2073 memcpy(loc_cpu_entry, repl->entries, repl->size);
2075 ret = translate_table(table->name, table->valid_hooks,
2076 newinfo, loc_cpu_entry, repl->size,
2081 xt_free_table_info(newinfo);
2085 ret = xt_register_table(table, &bootstrap, newinfo);
2087 xt_free_table_info(newinfo);
2094 void ipt_unregister_table(struct xt_table *table)
2096 struct xt_table_info *private;
2097 void *loc_cpu_entry;
2099 private = xt_unregister_table(table);
2101 /* Decrease module usage counts and free resources */
2102 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2103 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2104 xt_free_table_info(private);
2107 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2109 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2110 u_int8_t type, u_int8_t code,
2113 return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
2118 icmp_match(const struct sk_buff *skb,
2119 const struct net_device *in,
2120 const struct net_device *out,
2121 const struct xt_match *match,
2122 const void *matchinfo,
2124 unsigned int protoff,
2127 struct icmphdr _icmph, *ic;
2128 const struct ipt_icmp *icmpinfo = matchinfo;
2130 /* Must not be a fragment. */
2134 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2136 /* We've been asked to examine this packet, and we
2137 * can't. Hence, no choice but to drop.
2139 duprintf("Dropping evil ICMP tinygram.\n");
2144 return icmp_type_code_match(icmpinfo->type,
2148 !!(icmpinfo->invflags&IPT_ICMP_INV));
2151 /* Called when user tries to insert an entry of this type. */
2153 icmp_checkentry(const char *tablename,
2155 const struct xt_match *match,
2157 unsigned int hook_mask)
2159 const struct ipt_icmp *icmpinfo = matchinfo;
2161 /* Must specify no unknown invflags */
2162 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2165 /* The built-in targets: standard (NULL) and error. */
2166 static struct xt_target ipt_standard_target = {
2167 .name = IPT_STANDARD_TARGET,
2168 .targetsize = sizeof(int),
2170 #ifdef CONFIG_COMPAT
2171 .compatsize = sizeof(compat_int_t),
2172 .compat_from_user = compat_standard_from_user,
2173 .compat_to_user = compat_standard_to_user,
2177 static struct xt_target ipt_error_target = {
2178 .name = IPT_ERROR_TARGET,
2179 .target = ipt_error,
2180 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2184 static struct nf_sockopt_ops ipt_sockopts = {
2186 .set_optmin = IPT_BASE_CTL,
2187 .set_optmax = IPT_SO_SET_MAX+1,
2188 .set = do_ipt_set_ctl,
2189 #ifdef CONFIG_COMPAT
2190 .compat_set = compat_do_ipt_set_ctl,
2192 .get_optmin = IPT_BASE_CTL,
2193 .get_optmax = IPT_SO_GET_MAX+1,
2194 .get = do_ipt_get_ctl,
2195 #ifdef CONFIG_COMPAT
2196 .compat_get = compat_do_ipt_get_ctl,
2200 static struct xt_match icmp_matchstruct = {
2202 .match = icmp_match,
2203 .matchsize = sizeof(struct ipt_icmp),
2204 .proto = IPPROTO_ICMP,
2206 .checkentry = icmp_checkentry,
2209 static int __init ip_tables_init(void)
2213 ret = xt_proto_init(AF_INET);
2217 /* Noone else will be downing sem now, so we won't sleep */
2218 ret = xt_register_target(&ipt_standard_target);
2221 ret = xt_register_target(&ipt_error_target);
2224 ret = xt_register_match(&icmp_matchstruct);
2228 /* Register setsockopt */
2229 ret = nf_register_sockopt(&ipt_sockopts);
2233 printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2237 xt_unregister_match(&icmp_matchstruct);
2239 xt_unregister_target(&ipt_error_target);
2241 xt_unregister_target(&ipt_standard_target);
2243 xt_proto_fini(AF_INET);
2248 static void __exit ip_tables_fini(void)
2250 nf_unregister_sockopt(&ipt_sockopts);
2252 xt_unregister_match(&icmp_matchstruct);
2253 xt_unregister_target(&ipt_error_target);
2254 xt_unregister_target(&ipt_standard_target);
2256 xt_proto_fini(AF_INET);
2259 EXPORT_SYMBOL(ipt_register_table);
2260 EXPORT_SYMBOL(ipt_unregister_table);
2261 EXPORT_SYMBOL(ipt_do_table);
2262 module_init(ip_tables_init);
2263 module_exit(ip_tables_fini);