2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
12 * - increase module usage count as soon as we have rules inside
14 * 08 Oct 2005 Harald Welte <lafore@netfilter.org>
15 * - Generalize into "x_tables" layer and "{ip,ip6,arp}_tables"
17 #include <linux/cache.h>
18 #include <linux/capability.h>
19 #include <linux/skbuff.h>
20 #include <linux/kmod.h>
21 #include <linux/vmalloc.h>
22 #include <linux/netdevice.h>
23 #include <linux/module.h>
24 #include <linux/icmp.h>
26 #include <net/compat.h>
27 #include <asm/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
33 #include <linux/netfilter/x_tables.h>
34 #include <linux/netfilter_ipv4/ip_tables.h>
36 MODULE_LICENSE("GPL");
37 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
38 MODULE_DESCRIPTION("IPv4 packet filter");
40 /*#define DEBUG_IP_FIREWALL*/
41 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
42 /*#define DEBUG_IP_FIREWALL_USER*/
44 #ifdef DEBUG_IP_FIREWALL
45 #define dprintf(format, args...) printk(format , ## args)
47 #define dprintf(format, args...)
50 #ifdef DEBUG_IP_FIREWALL_USER
51 #define duprintf(format, args...) printk(format , ## args)
53 #define duprintf(format, args...)
56 #ifdef CONFIG_NETFILTER_DEBUG
57 #define IP_NF_ASSERT(x) \
60 printk("IP_NF_ASSERT: %s:%s:%u\n", \
61 __FUNCTION__, __FILE__, __LINE__); \
64 #define IP_NF_ASSERT(x)
68 /* All the better to debug you with... */
74 We keep a set of rules for each CPU, so we can avoid write-locking
75 them in the softirq when updating the counters and therefore
76 only need to read-lock in the softirq; doing a write_lock_bh() in user
77 context stops packets coming through and allows user context to read
78 the counters or update the rules.
80 Hence the start of any table is given by get_table() below. */
82 /* Returns whether matches rule or not. */
84 ip_packet_match(const struct iphdr *ip,
87 const struct ipt_ip *ipinfo,
93 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
95 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
97 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
99 dprintf("Source or dest mismatch.\n");
101 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
103 NIPQUAD(ipinfo->smsk.s_addr),
104 NIPQUAD(ipinfo->src.s_addr),
105 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
106 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
108 NIPQUAD(ipinfo->dmsk.s_addr),
109 NIPQUAD(ipinfo->dst.s_addr),
110 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
114 /* Look for ifname matches; this should unroll nicely. */
115 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
116 ret |= (((const unsigned long *)indev)[i]
117 ^ ((const unsigned long *)ipinfo->iniface)[i])
118 & ((const unsigned long *)ipinfo->iniface_mask)[i];
121 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
122 dprintf("VIA in mismatch (%s vs %s).%s\n",
123 indev, ipinfo->iniface,
124 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
128 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
129 ret |= (((const unsigned long *)outdev)[i]
130 ^ ((const unsigned long *)ipinfo->outiface)[i])
131 & ((const unsigned long *)ipinfo->outiface_mask)[i];
134 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
135 dprintf("VIA out mismatch (%s vs %s).%s\n",
136 outdev, ipinfo->outiface,
137 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
141 /* Check specific protocol */
143 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
144 dprintf("Packet protocol %hi does not match %hi.%s\n",
145 ip->protocol, ipinfo->proto,
146 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
150 /* If we have a fragment rule but the packet is not a fragment
151 * then we return zero */
152 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
153 dprintf("Fragment rule but not fragment.%s\n",
154 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
162 ip_checkentry(const struct ipt_ip *ip)
164 if (ip->flags & ~IPT_F_MASK) {
165 duprintf("Unknown flag bits set: %08X\n",
166 ip->flags & ~IPT_F_MASK);
169 if (ip->invflags & ~IPT_INV_MASK) {
170 duprintf("Unknown invflag bits set: %08X\n",
171 ip->invflags & ~IPT_INV_MASK);
178 ipt_error(struct sk_buff **pskb,
179 const struct net_device *in,
180 const struct net_device *out,
181 unsigned int hooknum,
182 const struct xt_target *target,
183 const void *targinfo)
186 printk("ip_tables: error: `%s'\n", (char *)targinfo);
192 int do_match(struct ipt_entry_match *m,
193 const struct sk_buff *skb,
194 const struct net_device *in,
195 const struct net_device *out,
199 /* Stop iteration if it doesn't match */
200 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
201 offset, skb->nh.iph->ihl*4, hotdrop))
207 static inline struct ipt_entry *
208 get_entry(void *base, unsigned int offset)
210 return (struct ipt_entry *)(base + offset);
213 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
215 ipt_do_table(struct sk_buff **pskb,
217 const struct net_device *in,
218 const struct net_device *out,
219 struct ipt_table *table)
221 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
226 /* Initializing verdict to NF_DROP keeps gcc happy. */
227 unsigned int verdict = NF_DROP;
228 const char *indev, *outdev;
230 struct ipt_entry *e, *back;
231 struct xt_table_info *private;
234 ip = (*pskb)->nh.iph;
235 datalen = (*pskb)->len - ip->ihl * 4;
236 indev = in ? in->name : nulldevname;
237 outdev = out ? out->name : nulldevname;
238 /* We handle fragments by dealing with the first fragment as
239 * if it was a normal packet. All other fragments are treated
240 * normally, except that they will NEVER match rules that ask
241 * things we don't know, ie. tcp syn flag or ports). If the
242 * rule is also a fragment-specific rule, non-fragments won't
244 offset = ntohs(ip->frag_off) & IP_OFFSET;
246 read_lock_bh(&table->lock);
247 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
248 private = table->private;
249 table_base = (void *)private->entries[smp_processor_id()];
250 e = get_entry(table_base, private->hook_entry[hook]);
252 /* For return from builtin chain */
253 back = get_entry(table_base, private->underflow[hook]);
258 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
259 struct ipt_entry_target *t;
261 if (IPT_MATCH_ITERATE(e, do_match,
263 offset, &hotdrop) != 0)
266 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
268 t = ipt_get_target(e);
269 IP_NF_ASSERT(t->u.kernel.target);
270 /* Standard target? */
271 if (!t->u.kernel.target->target) {
274 v = ((struct ipt_standard_target *)t)->verdict;
276 /* Pop from stack? */
277 if (v != IPT_RETURN) {
278 verdict = (unsigned)(-v) - 1;
282 back = get_entry(table_base,
286 if (table_base + v != (void *)e + e->next_offset
287 && !(e->ip.flags & IPT_F_GOTO)) {
288 /* Save old back ptr in next entry */
289 struct ipt_entry *next
290 = (void *)e + e->next_offset;
292 = (void *)back - table_base;
293 /* set back pointer to next entry */
297 e = get_entry(table_base, v);
299 /* Targets which reenter must return
301 #ifdef CONFIG_NETFILTER_DEBUG
302 ((struct ipt_entry *)table_base)->comefrom
305 verdict = t->u.kernel.target->target(pskb,
311 #ifdef CONFIG_NETFILTER_DEBUG
312 if (((struct ipt_entry *)table_base)->comefrom
314 && verdict == IPT_CONTINUE) {
315 printk("Target %s reentered!\n",
316 t->u.kernel.target->name);
319 ((struct ipt_entry *)table_base)->comefrom
322 /* Target might have changed stuff. */
323 ip = (*pskb)->nh.iph;
324 datalen = (*pskb)->len - ip->ihl * 4;
326 if (verdict == IPT_CONTINUE)
327 e = (void *)e + e->next_offset;
335 e = (void *)e + e->next_offset;
339 read_unlock_bh(&table->lock);
341 #ifdef DEBUG_ALLOW_ALL
350 /* All zeroes == unconditional rule. */
352 unconditional(const struct ipt_ip *ip)
356 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
357 if (((__u32 *)ip)[i])
363 /* Figures out from what hook each rule can be called: returns 0 if
364 there are loops. Puts hook bitmask in comefrom. */
366 mark_source_chains(struct xt_table_info *newinfo,
367 unsigned int valid_hooks, void *entry0)
371 /* No recursion; use packet counter to save back ptrs (reset
372 to 0 as we leave), and comefrom to save source hook bitmask */
373 for (hook = 0; hook < NF_IP_NUMHOOKS; hook++) {
374 unsigned int pos = newinfo->hook_entry[hook];
376 = (struct ipt_entry *)(entry0 + pos);
378 if (!(valid_hooks & (1 << hook)))
381 /* Set initial back pointer. */
382 e->counters.pcnt = pos;
385 struct ipt_standard_target *t
386 = (void *)ipt_get_target(e);
388 if (e->comefrom & (1 << NF_IP_NUMHOOKS)) {
389 printk("iptables: loop hook %u pos %u %08X.\n",
390 hook, pos, e->comefrom);
394 |= ((1 << hook) | (1 << NF_IP_NUMHOOKS));
396 /* Unconditional return/END. */
397 if (e->target_offset == sizeof(struct ipt_entry)
398 && (strcmp(t->target.u.user.name,
399 IPT_STANDARD_TARGET) == 0)
401 && unconditional(&e->ip)) {
402 unsigned int oldpos, size;
404 /* Return: backtrack through the last
407 e->comefrom ^= (1<<NF_IP_NUMHOOKS);
408 #ifdef DEBUG_IP_FIREWALL_USER
410 & (1 << NF_IP_NUMHOOKS)) {
411 duprintf("Back unset "
418 pos = e->counters.pcnt;
419 e->counters.pcnt = 0;
421 /* We're at the start. */
425 e = (struct ipt_entry *)
427 } while (oldpos == pos + e->next_offset);
430 size = e->next_offset;
431 e = (struct ipt_entry *)
432 (entry0 + pos + size);
433 e->counters.pcnt = pos;
436 int newpos = t->verdict;
438 if (strcmp(t->target.u.user.name,
439 IPT_STANDARD_TARGET) == 0
441 /* This a jump; chase it. */
442 duprintf("Jump rule %u -> %u\n",
445 /* ... this is a fallthru */
446 newpos = pos + e->next_offset;
448 e = (struct ipt_entry *)
450 e->counters.pcnt = pos;
455 duprintf("Finished chain %u\n", hook);
461 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
463 if (i && (*i)-- == 0)
466 if (m->u.kernel.match->destroy)
467 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
468 module_put(m->u.kernel.match->me);
473 standard_check(const struct ipt_entry_target *t,
474 unsigned int max_offset)
476 struct ipt_standard_target *targ = (void *)t;
478 /* Check standard info. */
479 if (targ->verdict >= 0
480 && targ->verdict > max_offset - sizeof(struct ipt_entry)) {
481 duprintf("ipt_standard_check: bad verdict (%i)\n",
485 if (targ->verdict < -NF_MAX_VERDICT - 1) {
486 duprintf("ipt_standard_check: bad negative verdict (%i)\n",
494 check_match(struct ipt_entry_match *m,
496 const struct ipt_ip *ip,
497 unsigned int hookmask,
500 struct ipt_match *match;
503 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
505 "ipt_%s", m->u.user.name);
506 if (IS_ERR(match) || !match) {
507 duprintf("check_match: `%s' not found\n", m->u.user.name);
508 return match ? PTR_ERR(match) : -ENOENT;
510 m->u.kernel.match = match;
512 ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
513 name, hookmask, ip->proto,
514 ip->invflags & IPT_INV_PROTO);
518 if (m->u.kernel.match->checkentry
519 && !m->u.kernel.match->checkentry(name, ip, match, m->data,
521 duprintf("ip_tables: check failed for `%s'.\n",
522 m->u.kernel.match->name);
530 module_put(m->u.kernel.match->me);
534 static struct ipt_target ipt_standard_target;
537 check_entry(struct ipt_entry *e, const char *name, unsigned int size,
540 struct ipt_entry_target *t;
541 struct ipt_target *target;
545 if (!ip_checkentry(&e->ip)) {
546 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
550 if (e->target_offset + sizeof(struct ipt_entry_target) > e->next_offset)
554 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
556 goto cleanup_matches;
558 t = ipt_get_target(e);
560 if (e->target_offset + t->u.target_size > e->next_offset)
561 goto cleanup_matches;
562 target = try_then_request_module(xt_find_target(AF_INET,
565 "ipt_%s", t->u.user.name);
566 if (IS_ERR(target) || !target) {
567 duprintf("check_entry: `%s' not found\n", t->u.user.name);
568 ret = target ? PTR_ERR(target) : -ENOENT;
569 goto cleanup_matches;
571 t->u.kernel.target = target;
573 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
574 name, e->comefrom, e->ip.proto,
575 e->ip.invflags & IPT_INV_PROTO);
579 if (t->u.kernel.target == &ipt_standard_target) {
580 if (!standard_check(t, size)) {
584 } else if (t->u.kernel.target->checkentry
585 && !t->u.kernel.target->checkentry(name, e, target, t->data,
587 duprintf("ip_tables: check failed for `%s'.\n",
588 t->u.kernel.target->name);
596 module_put(t->u.kernel.target->me);
598 IPT_MATCH_ITERATE(e, cleanup_match, &j);
603 check_entry_size_and_hooks(struct ipt_entry *e,
604 struct xt_table_info *newinfo,
606 unsigned char *limit,
607 const unsigned int *hook_entries,
608 const unsigned int *underflows,
613 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
614 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
615 duprintf("Bad offset %p\n", e);
620 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
621 duprintf("checking: element %p size %u\n",
626 /* Check hooks & underflows */
627 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
628 if ((unsigned char *)e - base == hook_entries[h])
629 newinfo->hook_entry[h] = hook_entries[h];
630 if ((unsigned char *)e - base == underflows[h])
631 newinfo->underflow[h] = underflows[h];
634 /* FIXME: underflows must be unconditional, standard verdicts
635 < 0 (not IPT_RETURN). --RR */
637 /* Clear counters and comefrom */
638 e->counters = ((struct xt_counters) { 0, 0 });
646 cleanup_entry(struct ipt_entry *e, unsigned int *i)
648 struct ipt_entry_target *t;
650 if (i && (*i)-- == 0)
653 /* Cleanup all matches */
654 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
655 t = ipt_get_target(e);
656 if (t->u.kernel.target->destroy)
657 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
658 module_put(t->u.kernel.target->me);
662 /* Checks and translates the user-supplied table segment (held in
665 translate_table(const char *name,
666 unsigned int valid_hooks,
667 struct xt_table_info *newinfo,
671 const unsigned int *hook_entries,
672 const unsigned int *underflows)
677 newinfo->size = size;
678 newinfo->number = number;
680 /* Init all hooks to impossible value. */
681 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
682 newinfo->hook_entry[i] = 0xFFFFFFFF;
683 newinfo->underflow[i] = 0xFFFFFFFF;
686 duprintf("translate_table: size %u\n", newinfo->size);
688 /* Walk through entries, checking offsets. */
689 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
690 check_entry_size_and_hooks,
694 hook_entries, underflows, &i);
699 duprintf("translate_table: %u not %u entries\n",
704 /* Check hooks all assigned */
705 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
706 /* Only hooks which are valid */
707 if (!(valid_hooks & (1 << i)))
709 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
710 duprintf("Invalid hook entry %u %u\n",
714 if (newinfo->underflow[i] == 0xFFFFFFFF) {
715 duprintf("Invalid underflow %u %u\n",
721 /* Finally, each sanity check must pass */
723 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
724 check_entry, name, size, &i);
730 if (!mark_source_chains(newinfo, valid_hooks, entry0))
733 /* And one copy for every other CPU */
734 for_each_possible_cpu(i) {
735 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
736 memcpy(newinfo->entries[i], entry0, newinfo->size);
741 IPT_ENTRY_ITERATE(entry0, newinfo->size, cleanup_entry, &i);
747 add_entry_to_counter(const struct ipt_entry *e,
748 struct xt_counters total[],
751 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
758 set_entry_to_counter(const struct ipt_entry *e,
759 struct ipt_counters total[],
762 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
769 get_counters(const struct xt_table_info *t,
770 struct xt_counters counters[])
776 /* Instead of clearing (by a previous call to memset())
777 * the counters and using adds, we set the counters
778 * with data used by 'current' CPU
779 * We dont care about preemption here.
781 curcpu = raw_smp_processor_id();
784 IPT_ENTRY_ITERATE(t->entries[curcpu],
786 set_entry_to_counter,
790 for_each_possible_cpu(cpu) {
794 IPT_ENTRY_ITERATE(t->entries[cpu],
796 add_entry_to_counter,
802 static inline struct xt_counters * alloc_counters(struct ipt_table *table)
804 unsigned int countersize;
805 struct xt_counters *counters;
806 struct xt_table_info *private = table->private;
808 /* We need atomic snapshot of counters: rest doesn't change
809 (other than comefrom, which userspace doesn't care
811 countersize = sizeof(struct xt_counters) * private->number;
812 counters = vmalloc_node(countersize, numa_node_id());
814 if (counters == NULL)
815 return ERR_PTR(-ENOMEM);
817 /* First, sum counters... */
818 write_lock_bh(&table->lock);
819 get_counters(private, counters);
820 write_unlock_bh(&table->lock);
826 copy_entries_to_user(unsigned int total_size,
827 struct ipt_table *table,
828 void __user *userptr)
830 unsigned int off, num;
832 struct xt_counters *counters;
833 struct xt_table_info *private = table->private;
837 counters = alloc_counters(table);
838 if (IS_ERR(counters))
839 return PTR_ERR(counters);
841 /* choose the copy that is on our node/cpu, ...
842 * This choice is lazy (because current thread is
843 * allowed to migrate to another cpu)
845 loc_cpu_entry = private->entries[raw_smp_processor_id()];
846 /* ... then copy entire thing ... */
847 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
852 /* FIXME: use iterator macros --RR */
853 /* ... then go back and fix counters and names */
854 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
856 struct ipt_entry_match *m;
857 struct ipt_entry_target *t;
859 e = (struct ipt_entry *)(loc_cpu_entry + off);
860 if (copy_to_user(userptr + off
861 + offsetof(struct ipt_entry, counters),
863 sizeof(counters[num])) != 0) {
868 for (i = sizeof(struct ipt_entry);
869 i < e->target_offset;
870 i += m->u.match_size) {
873 if (copy_to_user(userptr + off + i
874 + offsetof(struct ipt_entry_match,
876 m->u.kernel.match->name,
877 strlen(m->u.kernel.match->name)+1)
884 t = ipt_get_target(e);
885 if (copy_to_user(userptr + off + e->target_offset
886 + offsetof(struct ipt_entry_target,
888 t->u.kernel.target->name,
889 strlen(t->u.kernel.target->name)+1) != 0) {
901 struct compat_delta {
902 struct compat_delta *next;
907 static struct compat_delta *compat_offsets = NULL;
909 static int compat_add_offset(u_int16_t offset, short delta)
911 struct compat_delta *tmp;
913 tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
916 tmp->offset = offset;
918 if (compat_offsets) {
919 tmp->next = compat_offsets->next;
920 compat_offsets->next = tmp;
922 compat_offsets = tmp;
928 static void compat_flush_offsets(void)
930 struct compat_delta *tmp, *next;
932 if (compat_offsets) {
933 for(tmp = compat_offsets; tmp; tmp = next) {
937 compat_offsets = NULL;
941 static short compat_calc_jump(u_int16_t offset)
943 struct compat_delta *tmp;
946 for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
947 if (tmp->offset < offset)
952 static void compat_standard_from_user(void *dst, void *src)
954 int v = *(compat_int_t *)src;
957 v += compat_calc_jump(v);
958 memcpy(dst, &v, sizeof(v));
961 static int compat_standard_to_user(void __user *dst, void *src)
963 compat_int_t cv = *(int *)src;
966 cv -= compat_calc_jump(cv);
967 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
971 compat_calc_match(struct ipt_entry_match *m, int * size)
973 *size += xt_compat_match_offset(m->u.kernel.match);
977 static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info,
978 void *base, struct xt_table_info *newinfo)
980 struct ipt_entry_target *t;
981 u_int16_t entry_offset;
985 entry_offset = (void *)e - base;
986 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
987 t = ipt_get_target(e);
988 off += xt_compat_target_offset(t->u.kernel.target);
989 newinfo->size -= off;
990 ret = compat_add_offset(entry_offset, off);
994 for (i = 0; i< NF_IP_NUMHOOKS; i++) {
995 if (info->hook_entry[i] && (e < (struct ipt_entry *)
996 (base + info->hook_entry[i])))
997 newinfo->hook_entry[i] -= off;
998 if (info->underflow[i] && (e < (struct ipt_entry *)
999 (base + info->underflow[i])))
1000 newinfo->underflow[i] -= off;
1005 static int compat_table_info(struct xt_table_info *info,
1006 struct xt_table_info *newinfo)
1008 void *loc_cpu_entry;
1011 if (!newinfo || !info)
1014 memset(newinfo, 0, sizeof(struct xt_table_info));
1015 newinfo->size = info->size;
1016 newinfo->number = info->number;
1017 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1018 newinfo->hook_entry[i] = info->hook_entry[i];
1019 newinfo->underflow[i] = info->underflow[i];
1021 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1022 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1023 compat_calc_entry, info, loc_cpu_entry, newinfo);
1027 static int get_info(void __user *user, int *len, int compat)
1029 char name[IPT_TABLE_MAXNAMELEN];
1030 struct ipt_table *t;
1033 if (*len != sizeof(struct ipt_getinfo)) {
1034 duprintf("length %u != %u\n", *len,
1035 (unsigned int)sizeof(struct ipt_getinfo));
1039 if (copy_from_user(name, user, sizeof(name)) != 0)
1042 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1043 #ifdef CONFIG_COMPAT
1045 xt_compat_lock(AF_INET);
1047 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1048 "iptable_%s", name);
1049 if (t && !IS_ERR(t)) {
1050 struct ipt_getinfo info;
1051 struct xt_table_info *private = t->private;
1053 #ifdef CONFIG_COMPAT
1055 struct xt_table_info tmp;
1056 ret = compat_table_info(private, &tmp);
1057 compat_flush_offsets();
1061 info.valid_hooks = t->valid_hooks;
1062 memcpy(info.hook_entry, private->hook_entry,
1063 sizeof(info.hook_entry));
1064 memcpy(info.underflow, private->underflow,
1065 sizeof(info.underflow));
1066 info.num_entries = private->number;
1067 info.size = private->size;
1068 strcpy(info.name, name);
1070 if (copy_to_user(user, &info, *len) != 0)
1078 ret = t ? PTR_ERR(t) : -ENOENT;
1079 #ifdef CONFIG_COMPAT
1081 xt_compat_unlock(AF_INET);
1087 get_entries(struct ipt_get_entries __user *uptr, int *len)
1090 struct ipt_get_entries get;
1091 struct ipt_table *t;
1093 if (*len < sizeof(get)) {
1094 duprintf("get_entries: %u < %d\n", *len,
1095 (unsigned int)sizeof(get));
1098 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1100 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1101 duprintf("get_entries: %u != %u\n", *len,
1102 (unsigned int)(sizeof(struct ipt_get_entries) +
1107 t = xt_find_table_lock(AF_INET, get.name);
1108 if (t && !IS_ERR(t)) {
1109 struct xt_table_info *private = t->private;
1110 duprintf("t->private->number = %u\n",
1112 if (get.size == private->size)
1113 ret = copy_entries_to_user(private->size,
1114 t, uptr->entrytable);
1116 duprintf("get_entries: I've got %u not %u!\n",
1124 ret = t ? PTR_ERR(t) : -ENOENT;
1130 __do_replace(const char *name, unsigned int valid_hooks,
1131 struct xt_table_info *newinfo, unsigned int num_counters,
1132 void __user *counters_ptr)
1135 struct ipt_table *t;
1136 struct xt_table_info *oldinfo;
1137 struct xt_counters *counters;
1138 void *loc_cpu_old_entry;
1141 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1147 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1148 "iptable_%s", name);
1149 if (!t || IS_ERR(t)) {
1150 ret = t ? PTR_ERR(t) : -ENOENT;
1151 goto free_newinfo_counters_untrans;
1155 if (valid_hooks != t->valid_hooks) {
1156 duprintf("Valid hook crap: %08X vs %08X\n",
1157 valid_hooks, t->valid_hooks);
1162 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1166 /* Update module usage count based on number of rules */
1167 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1168 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1169 if ((oldinfo->number > oldinfo->initial_entries) ||
1170 (newinfo->number <= oldinfo->initial_entries))
1172 if ((oldinfo->number > oldinfo->initial_entries) &&
1173 (newinfo->number <= oldinfo->initial_entries))
1176 /* Get the old counters. */
1177 get_counters(oldinfo, counters);
1178 /* Decrease module usage counts and free resource */
1179 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1180 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1181 xt_free_table_info(oldinfo);
1182 if (copy_to_user(counters_ptr, counters,
1183 sizeof(struct xt_counters) * num_counters) != 0)
1192 free_newinfo_counters_untrans:
1199 do_replace(void __user *user, unsigned int len)
1202 struct ipt_replace tmp;
1203 struct xt_table_info *newinfo;
1204 void *loc_cpu_entry;
1206 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1209 /* Hack: Causes ipchains to give correct error msg --RR */
1210 if (len != sizeof(tmp) + tmp.size)
1211 return -ENOPROTOOPT;
1213 /* overflow check */
1214 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1217 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1220 newinfo = xt_alloc_table_info(tmp.size);
1224 /* choose the copy that is our node/cpu */
1225 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1226 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1232 ret = translate_table(tmp.name, tmp.valid_hooks,
1233 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1234 tmp.hook_entry, tmp.underflow);
1238 duprintf("ip_tables: Translated table\n");
1240 ret = __do_replace(tmp.name, tmp.valid_hooks,
1241 newinfo, tmp.num_counters,
1244 goto free_newinfo_untrans;
1247 free_newinfo_untrans:
1248 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1250 xt_free_table_info(newinfo);
1254 /* We're lazy, and add to the first CPU; overflow works its fey magic
1255 * and everything is OK. */
1257 add_counter_to_entry(struct ipt_entry *e,
1258 const struct xt_counters addme[],
1262 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1264 (long unsigned int)e->counters.pcnt,
1265 (long unsigned int)e->counters.bcnt,
1266 (long unsigned int)addme[*i].pcnt,
1267 (long unsigned int)addme[*i].bcnt);
1270 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1277 do_add_counters(void __user *user, unsigned int len, int compat)
1280 struct xt_counters_info tmp;
1281 struct xt_counters *paddc;
1282 unsigned int num_counters;
1286 struct ipt_table *t;
1287 struct xt_table_info *private;
1289 void *loc_cpu_entry;
1290 #ifdef CONFIG_COMPAT
1291 struct compat_xt_counters_info compat_tmp;
1295 size = sizeof(struct compat_xt_counters_info);
1300 size = sizeof(struct xt_counters_info);
1303 if (copy_from_user(ptmp, user, size) != 0)
1306 #ifdef CONFIG_COMPAT
1308 num_counters = compat_tmp.num_counters;
1309 name = compat_tmp.name;
1313 num_counters = tmp.num_counters;
1317 if (len != size + num_counters * sizeof(struct xt_counters))
1320 paddc = vmalloc_node(len - size, numa_node_id());
1324 if (copy_from_user(paddc, user + size, len - size) != 0) {
1329 t = xt_find_table_lock(AF_INET, name);
1330 if (!t || IS_ERR(t)) {
1331 ret = t ? PTR_ERR(t) : -ENOENT;
1335 write_lock_bh(&t->lock);
1336 private = t->private;
1337 if (private->number != num_counters) {
1339 goto unlock_up_free;
1343 /* Choose the copy that is on our node */
1344 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1345 IPT_ENTRY_ITERATE(loc_cpu_entry,
1347 add_counter_to_entry,
1351 write_unlock_bh(&t->lock);
1360 #ifdef CONFIG_COMPAT
1361 struct compat_ipt_replace {
1362 char name[IPT_TABLE_MAXNAMELEN];
1366 u32 hook_entry[NF_IP_NUMHOOKS];
1367 u32 underflow[NF_IP_NUMHOOKS];
1369 compat_uptr_t counters; /* struct ipt_counters * */
1370 struct compat_ipt_entry entries[0];
1373 static inline int compat_copy_match_to_user(struct ipt_entry_match *m,
1374 void __user **dstptr, compat_uint_t *size)
1376 return xt_compat_match_to_user(m, dstptr, size);
1379 static int compat_copy_entry_to_user(struct ipt_entry *e,
1380 void __user **dstptr, compat_uint_t *size)
1382 struct ipt_entry_target *t;
1383 struct compat_ipt_entry __user *ce;
1384 u_int16_t target_offset, next_offset;
1385 compat_uint_t origsize;
1390 ce = (struct compat_ipt_entry __user *)*dstptr;
1391 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1394 *dstptr += sizeof(struct compat_ipt_entry);
1395 ret = IPT_MATCH_ITERATE(e, compat_copy_match_to_user, dstptr, size);
1396 target_offset = e->target_offset - (origsize - *size);
1399 t = ipt_get_target(e);
1400 ret = xt_compat_target_to_user(t, dstptr, size);
1404 next_offset = e->next_offset - (origsize - *size);
1405 if (put_user(target_offset, &ce->target_offset))
1407 if (put_user(next_offset, &ce->next_offset))
1415 compat_check_calc_match(struct ipt_entry_match *m,
1417 const struct ipt_ip *ip,
1418 unsigned int hookmask,
1421 struct ipt_match *match;
1423 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1424 m->u.user.revision),
1425 "ipt_%s", m->u.user.name);
1426 if (IS_ERR(match) || !match) {
1427 duprintf("compat_check_calc_match: `%s' not found\n",
1429 return match ? PTR_ERR(match) : -ENOENT;
1431 m->u.kernel.match = match;
1432 *size += xt_compat_match_offset(match);
1439 check_compat_entry_size_and_hooks(struct ipt_entry *e,
1440 struct xt_table_info *newinfo,
1442 unsigned char *base,
1443 unsigned char *limit,
1444 unsigned int *hook_entries,
1445 unsigned int *underflows,
1449 struct ipt_entry_target *t;
1450 struct ipt_target *target;
1451 u_int16_t entry_offset;
1454 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1455 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1456 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1457 duprintf("Bad offset %p, limit = %p\n", e, limit);
1461 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1462 sizeof(struct compat_xt_entry_target)) {
1463 duprintf("checking: element %p size %u\n",
1468 if (!ip_checkentry(&e->ip)) {
1469 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
1473 if (e->target_offset + sizeof(struct compat_xt_entry_target) >
1478 entry_offset = (void *)e - (void *)base;
1480 ret = IPT_MATCH_ITERATE(e, compat_check_calc_match, name, &e->ip,
1481 e->comefrom, &off, &j);
1483 goto cleanup_matches;
1485 t = ipt_get_target(e);
1487 if (e->target_offset + t->u.target_size > e->next_offset)
1488 goto cleanup_matches;
1489 target = try_then_request_module(xt_find_target(AF_INET,
1491 t->u.user.revision),
1492 "ipt_%s", t->u.user.name);
1493 if (IS_ERR(target) || !target) {
1494 duprintf("check_entry: `%s' not found\n", t->u.user.name);
1495 ret = target ? PTR_ERR(target) : -ENOENT;
1496 goto cleanup_matches;
1498 t->u.kernel.target = target;
1500 off += xt_compat_target_offset(target);
1502 ret = compat_add_offset(entry_offset, off);
1506 /* Check hooks & underflows */
1507 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1508 if ((unsigned char *)e - base == hook_entries[h])
1509 newinfo->hook_entry[h] = hook_entries[h];
1510 if ((unsigned char *)e - base == underflows[h])
1511 newinfo->underflow[h] = underflows[h];
1514 /* Clear counters and comefrom */
1515 e->counters = ((struct ipt_counters) { 0, 0 });
1522 module_put(t->u.kernel.target->me);
1524 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1528 static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
1529 void **dstptr, compat_uint_t *size, const char *name,
1530 const struct ipt_ip *ip, unsigned int hookmask, int *i)
1532 struct ipt_entry_match *dm;
1533 struct ipt_match *match;
1536 dm = (struct ipt_entry_match *)*dstptr;
1537 match = m->u.kernel.match;
1538 xt_compat_match_from_user(m, dstptr, size);
1540 ret = xt_check_match(match, AF_INET, dm->u.match_size - sizeof(*dm),
1541 name, hookmask, ip->proto,
1542 ip->invflags & IPT_INV_PROTO);
1546 if (m->u.kernel.match->checkentry
1547 && !m->u.kernel.match->checkentry(name, ip, match, dm->data,
1549 duprintf("ip_tables: check failed for `%s'.\n",
1550 m->u.kernel.match->name);
1558 module_put(m->u.kernel.match->me);
1562 static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
1563 unsigned int *size, const char *name,
1564 struct xt_table_info *newinfo, unsigned char *base)
1566 struct ipt_entry_target *t;
1567 struct ipt_target *target;
1568 struct ipt_entry *de;
1569 unsigned int origsize;
1574 de = (struct ipt_entry *)*dstptr;
1575 memcpy(de, e, sizeof(struct ipt_entry));
1578 *dstptr += sizeof(struct compat_ipt_entry);
1579 ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
1580 name, &de->ip, de->comefrom, &j);
1582 goto cleanup_matches;
1583 de->target_offset = e->target_offset - (origsize - *size);
1584 t = ipt_get_target(e);
1585 target = t->u.kernel.target;
1586 xt_compat_target_from_user(t, dstptr, size);
1588 de->next_offset = e->next_offset - (origsize - *size);
1589 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1590 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1591 newinfo->hook_entry[h] -= origsize - *size;
1592 if ((unsigned char *)de - base < newinfo->underflow[h])
1593 newinfo->underflow[h] -= origsize - *size;
1596 t = ipt_get_target(de);
1597 target = t->u.kernel.target;
1598 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
1599 name, e->comefrom, e->ip.proto,
1600 e->ip.invflags & IPT_INV_PROTO);
1605 if (t->u.kernel.target == &ipt_standard_target) {
1606 if (!standard_check(t, *size))
1608 } else if (t->u.kernel.target->checkentry
1609 && !t->u.kernel.target->checkentry(name, de, target,
1610 t->data, de->comefrom)) {
1611 duprintf("ip_tables: compat: check failed for `%s'.\n",
1612 t->u.kernel.target->name);
1619 module_put(t->u.kernel.target->me);
1621 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1626 translate_compat_table(const char *name,
1627 unsigned int valid_hooks,
1628 struct xt_table_info **pinfo,
1630 unsigned int total_size,
1631 unsigned int number,
1632 unsigned int *hook_entries,
1633 unsigned int *underflows)
1636 struct xt_table_info *newinfo, *info;
1637 void *pos, *entry0, *entry1;
1644 info->number = number;
1646 /* Init all hooks to impossible value. */
1647 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1648 info->hook_entry[i] = 0xFFFFFFFF;
1649 info->underflow[i] = 0xFFFFFFFF;
1652 duprintf("translate_compat_table: size %u\n", info->size);
1654 xt_compat_lock(AF_INET);
1655 /* Walk through entries, checking offsets. */
1656 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1657 check_compat_entry_size_and_hooks,
1658 info, &size, entry0,
1659 entry0 + total_size,
1660 hook_entries, underflows, &i, name);
1666 duprintf("translate_compat_table: %u not %u entries\n",
1671 /* Check hooks all assigned */
1672 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1673 /* Only hooks which are valid */
1674 if (!(valid_hooks & (1 << i)))
1676 if (info->hook_entry[i] == 0xFFFFFFFF) {
1677 duprintf("Invalid hook entry %u %u\n",
1678 i, hook_entries[i]);
1681 if (info->underflow[i] == 0xFFFFFFFF) {
1682 duprintf("Invalid underflow %u %u\n",
1689 newinfo = xt_alloc_table_info(size);
1693 newinfo->number = number;
1694 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1695 newinfo->hook_entry[i] = info->hook_entry[i];
1696 newinfo->underflow[i] = info->underflow[i];
1698 entry1 = newinfo->entries[raw_smp_processor_id()];
1701 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1702 compat_copy_entry_from_user, &pos, &size,
1703 name, newinfo, entry1);
1704 compat_flush_offsets();
1705 xt_compat_unlock(AF_INET);
1710 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1713 /* And one copy for every other CPU */
1714 for_each_possible_cpu(i)
1715 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1716 memcpy(newinfo->entries[i], entry1, newinfo->size);
1720 xt_free_table_info(info);
1724 xt_free_table_info(newinfo);
1728 xt_compat_unlock(AF_INET);
1733 compat_do_replace(void __user *user, unsigned int len)
1736 struct compat_ipt_replace tmp;
1737 struct xt_table_info *newinfo;
1738 void *loc_cpu_entry;
1740 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1743 /* Hack: Causes ipchains to give correct error msg --RR */
1744 if (len != sizeof(tmp) + tmp.size)
1745 return -ENOPROTOOPT;
1747 /* overflow check */
1748 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1751 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1754 newinfo = xt_alloc_table_info(tmp.size);
1758 /* choose the copy that is our node/cpu */
1759 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1760 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1766 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1767 &newinfo, &loc_cpu_entry, tmp.size,
1768 tmp.num_entries, tmp.hook_entry, tmp.underflow);
1772 duprintf("compat_do_replace: Translated table\n");
1774 ret = __do_replace(tmp.name, tmp.valid_hooks,
1775 newinfo, tmp.num_counters,
1776 compat_ptr(tmp.counters));
1778 goto free_newinfo_untrans;
1781 free_newinfo_untrans:
1782 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1784 xt_free_table_info(newinfo);
1789 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1794 if (!capable(CAP_NET_ADMIN))
1798 case IPT_SO_SET_REPLACE:
1799 ret = compat_do_replace(user, len);
1802 case IPT_SO_SET_ADD_COUNTERS:
1803 ret = do_add_counters(user, len, 1);
1807 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1814 struct compat_ipt_get_entries
1816 char name[IPT_TABLE_MAXNAMELEN];
1818 struct compat_ipt_entry entrytable[0];
1821 static int compat_copy_entries_to_user(unsigned int total_size,
1822 struct ipt_table *table, void __user *userptr)
1824 unsigned int off, num;
1825 struct compat_ipt_entry e;
1826 struct xt_counters *counters;
1827 struct xt_table_info *private = table->private;
1831 void *loc_cpu_entry;
1833 counters = alloc_counters(table);
1834 if (IS_ERR(counters))
1835 return PTR_ERR(counters);
1837 /* choose the copy that is on our node/cpu, ...
1838 * This choice is lazy (because current thread is
1839 * allowed to migrate to another cpu)
1841 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1844 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1845 compat_copy_entry_to_user, &pos, &size);
1849 /* ... then go back and fix counters and names */
1850 for (off = 0, num = 0; off < size; off += e.next_offset, num++) {
1852 struct ipt_entry_match m;
1853 struct ipt_entry_target t;
1856 if (copy_from_user(&e, userptr + off,
1857 sizeof(struct compat_ipt_entry)))
1859 if (copy_to_user(userptr + off +
1860 offsetof(struct compat_ipt_entry, counters),
1861 &counters[num], sizeof(counters[num])))
1864 for (i = sizeof(struct compat_ipt_entry);
1865 i < e.target_offset; i += m.u.match_size) {
1866 if (copy_from_user(&m, userptr + off + i,
1867 sizeof(struct ipt_entry_match)))
1869 if (copy_to_user(userptr + off + i +
1870 offsetof(struct ipt_entry_match, u.user.name),
1871 m.u.kernel.match->name,
1872 strlen(m.u.kernel.match->name) + 1))
1876 if (copy_from_user(&t, userptr + off + e.target_offset,
1877 sizeof(struct ipt_entry_target)))
1879 if (copy_to_user(userptr + off + e.target_offset +
1880 offsetof(struct ipt_entry_target, u.user.name),
1881 t.u.kernel.target->name,
1882 strlen(t.u.kernel.target->name) + 1))
1892 compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1895 struct compat_ipt_get_entries get;
1896 struct ipt_table *t;
1899 if (*len < sizeof(get)) {
1900 duprintf("compat_get_entries: %u < %u\n",
1901 *len, (unsigned int)sizeof(get));
1905 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1908 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1909 duprintf("compat_get_entries: %u != %u\n", *len,
1910 (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1915 xt_compat_lock(AF_INET);
1916 t = xt_find_table_lock(AF_INET, get.name);
1917 if (t && !IS_ERR(t)) {
1918 struct xt_table_info *private = t->private;
1919 struct xt_table_info info;
1920 duprintf("t->private->number = %u\n",
1922 ret = compat_table_info(private, &info);
1923 if (!ret && get.size == info.size) {
1924 ret = compat_copy_entries_to_user(private->size,
1925 t, uptr->entrytable);
1927 duprintf("compat_get_entries: I've got %u not %u!\n",
1932 compat_flush_offsets();
1936 ret = t ? PTR_ERR(t) : -ENOENT;
1938 xt_compat_unlock(AF_INET);
1942 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1945 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1949 if (!capable(CAP_NET_ADMIN))
1953 case IPT_SO_GET_INFO:
1954 ret = get_info(user, len, 1);
1956 case IPT_SO_GET_ENTRIES:
1957 ret = compat_get_entries(user, len);
1960 ret = do_ipt_get_ctl(sk, cmd, user, len);
1967 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1971 if (!capable(CAP_NET_ADMIN))
1975 case IPT_SO_SET_REPLACE:
1976 ret = do_replace(user, len);
1979 case IPT_SO_SET_ADD_COUNTERS:
1980 ret = do_add_counters(user, len, 0);
1984 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1992 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1996 if (!capable(CAP_NET_ADMIN))
2000 case IPT_SO_GET_INFO:
2001 ret = get_info(user, len, 0);
2004 case IPT_SO_GET_ENTRIES:
2005 ret = get_entries(user, len);
2008 case IPT_SO_GET_REVISION_MATCH:
2009 case IPT_SO_GET_REVISION_TARGET: {
2010 struct ipt_get_revision rev;
2013 if (*len != sizeof(rev)) {
2017 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2022 if (cmd == IPT_SO_GET_REVISION_TARGET)
2027 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2030 "ipt_%s", rev.name);
2035 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2042 int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2045 struct xt_table_info *newinfo;
2046 static struct xt_table_info bootstrap
2047 = { 0, 0, 0, { 0 }, { 0 }, { } };
2048 void *loc_cpu_entry;
2050 newinfo = xt_alloc_table_info(repl->size);
2054 /* choose the copy on our node/cpu
2055 * but dont care of preemption
2057 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2058 memcpy(loc_cpu_entry, repl->entries, repl->size);
2060 ret = translate_table(table->name, table->valid_hooks,
2061 newinfo, loc_cpu_entry, repl->size,
2066 xt_free_table_info(newinfo);
2070 ret = xt_register_table(table, &bootstrap, newinfo);
2072 xt_free_table_info(newinfo);
2079 void ipt_unregister_table(struct ipt_table *table)
2081 struct xt_table_info *private;
2082 void *loc_cpu_entry;
2084 private = xt_unregister_table(table);
2086 /* Decrease module usage counts and free resources */
2087 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2088 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2089 xt_free_table_info(private);
2092 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2094 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2095 u_int8_t type, u_int8_t code,
2098 return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
2103 icmp_match(const struct sk_buff *skb,
2104 const struct net_device *in,
2105 const struct net_device *out,
2106 const struct xt_match *match,
2107 const void *matchinfo,
2109 unsigned int protoff,
2112 struct icmphdr _icmph, *ic;
2113 const struct ipt_icmp *icmpinfo = matchinfo;
2115 /* Must not be a fragment. */
2119 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2121 /* We've been asked to examine this packet, and we
2122 * can't. Hence, no choice but to drop.
2124 duprintf("Dropping evil ICMP tinygram.\n");
2129 return icmp_type_code_match(icmpinfo->type,
2133 !!(icmpinfo->invflags&IPT_ICMP_INV));
2136 /* Called when user tries to insert an entry of this type. */
2138 icmp_checkentry(const char *tablename,
2140 const struct xt_match *match,
2142 unsigned int hook_mask)
2144 const struct ipt_icmp *icmpinfo = matchinfo;
2146 /* Must specify no unknown invflags */
2147 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2150 /* The built-in targets: standard (NULL) and error. */
2151 static struct ipt_target ipt_standard_target = {
2152 .name = IPT_STANDARD_TARGET,
2153 .targetsize = sizeof(int),
2155 #ifdef CONFIG_COMPAT
2156 .compatsize = sizeof(compat_int_t),
2157 .compat_from_user = compat_standard_from_user,
2158 .compat_to_user = compat_standard_to_user,
2162 static struct ipt_target ipt_error_target = {
2163 .name = IPT_ERROR_TARGET,
2164 .target = ipt_error,
2165 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2169 static struct nf_sockopt_ops ipt_sockopts = {
2171 .set_optmin = IPT_BASE_CTL,
2172 .set_optmax = IPT_SO_SET_MAX+1,
2173 .set = do_ipt_set_ctl,
2174 #ifdef CONFIG_COMPAT
2175 .compat_set = compat_do_ipt_set_ctl,
2177 .get_optmin = IPT_BASE_CTL,
2178 .get_optmax = IPT_SO_GET_MAX+1,
2179 .get = do_ipt_get_ctl,
2180 #ifdef CONFIG_COMPAT
2181 .compat_get = compat_do_ipt_get_ctl,
2185 static struct ipt_match icmp_matchstruct = {
2187 .match = icmp_match,
2188 .matchsize = sizeof(struct ipt_icmp),
2189 .proto = IPPROTO_ICMP,
2191 .checkentry = icmp_checkentry,
2194 static int __init ip_tables_init(void)
2198 ret = xt_proto_init(AF_INET);
2202 /* Noone else will be downing sem now, so we won't sleep */
2203 ret = xt_register_target(&ipt_standard_target);
2206 ret = xt_register_target(&ipt_error_target);
2209 ret = xt_register_match(&icmp_matchstruct);
2213 /* Register setsockopt */
2214 ret = nf_register_sockopt(&ipt_sockopts);
2218 printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2222 xt_unregister_match(&icmp_matchstruct);
2224 xt_unregister_target(&ipt_error_target);
2226 xt_unregister_target(&ipt_standard_target);
2228 xt_proto_fini(AF_INET);
2233 static void __exit ip_tables_fini(void)
2235 nf_unregister_sockopt(&ipt_sockopts);
2237 xt_unregister_match(&icmp_matchstruct);
2238 xt_unregister_target(&ipt_error_target);
2239 xt_unregister_target(&ipt_standard_target);
2241 xt_proto_fini(AF_INET);
2244 EXPORT_SYMBOL(ipt_register_table);
2245 EXPORT_SYMBOL(ipt_unregister_table);
2246 EXPORT_SYMBOL(ipt_do_table);
2247 module_init(ip_tables_init);
2248 module_exit(ip_tables_fini);