[NETFILTER]: Rename init functions.
[safe/jmp/linux-2.6] / net / ipv4 / netfilter / ip_tables.c
1 /*
2  * Packet matching code.
3  *
4  * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5  * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
12  *      - increase module usage count as soon as we have rules inside
13  *        a table
14  * 08 Oct 2005 Harald Welte <lafore@netfilter.org>
15  *      - Generalize into "x_tables" layer and "{ip,ip6,arp}_tables"
16  */
17 #include <linux/config.h>
18 #include <linux/cache.h>
19 #include <linux/capability.h>
20 #include <linux/skbuff.h>
21 #include <linux/kmod.h>
22 #include <linux/vmalloc.h>
23 #include <linux/netdevice.h>
24 #include <linux/module.h>
25 #include <linux/icmp.h>
26 #include <net/ip.h>
27 #include <asm/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
32
33 #include <linux/netfilter/x_tables.h>
34 #include <linux/netfilter_ipv4/ip_tables.h>
35
36 MODULE_LICENSE("GPL");
37 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
38 MODULE_DESCRIPTION("IPv4 packet filter");
39
40 /*#define DEBUG_IP_FIREWALL*/
41 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
42 /*#define DEBUG_IP_FIREWALL_USER*/
43
44 #ifdef DEBUG_IP_FIREWALL
45 #define dprintf(format, args...)  printk(format , ## args)
46 #else
47 #define dprintf(format, args...)
48 #endif
49
50 #ifdef DEBUG_IP_FIREWALL_USER
51 #define duprintf(format, args...) printk(format , ## args)
52 #else
53 #define duprintf(format, args...)
54 #endif
55
56 #ifdef CONFIG_NETFILTER_DEBUG
57 #define IP_NF_ASSERT(x)                                         \
58 do {                                                            \
59         if (!(x))                                               \
60                 printk("IP_NF_ASSERT: %s:%s:%u\n",              \
61                        __FUNCTION__, __FILE__, __LINE__);       \
62 } while(0)
63 #else
64 #define IP_NF_ASSERT(x)
65 #endif
66
67 #if 0
68 /* All the better to debug you with... */
69 #define static
70 #define inline
71 #endif
72
73 /*
74    We keep a set of rules for each CPU, so we can avoid write-locking
75    them in the softirq when updating the counters and therefore
76    only need to read-lock in the softirq; doing a write_lock_bh() in user
77    context stops packets coming through and allows user context to read
78    the counters or update the rules.
79
80    Hence the start of any table is given by get_table() below.  */
81
82 /* Returns whether matches rule or not. */
83 static inline int
84 ip_packet_match(const struct iphdr *ip,
85                 const char *indev,
86                 const char *outdev,
87                 const struct ipt_ip *ipinfo,
88                 int isfrag)
89 {
90         size_t i;
91         unsigned long ret;
92
93 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
94
95         if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
96                   IPT_INV_SRCIP)
97             || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
98                      IPT_INV_DSTIP)) {
99                 dprintf("Source or dest mismatch.\n");
100
101                 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
102                         NIPQUAD(ip->saddr),
103                         NIPQUAD(ipinfo->smsk.s_addr),
104                         NIPQUAD(ipinfo->src.s_addr),
105                         ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
106                 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
107                         NIPQUAD(ip->daddr),
108                         NIPQUAD(ipinfo->dmsk.s_addr),
109                         NIPQUAD(ipinfo->dst.s_addr),
110                         ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
111                 return 0;
112         }
113
114         /* Look for ifname matches; this should unroll nicely. */
115         for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
116                 ret |= (((const unsigned long *)indev)[i]
117                         ^ ((const unsigned long *)ipinfo->iniface)[i])
118                         & ((const unsigned long *)ipinfo->iniface_mask)[i];
119         }
120
121         if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
122                 dprintf("VIA in mismatch (%s vs %s).%s\n",
123                         indev, ipinfo->iniface,
124                         ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
125                 return 0;
126         }
127
128         for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
129                 ret |= (((const unsigned long *)outdev)[i]
130                         ^ ((const unsigned long *)ipinfo->outiface)[i])
131                         & ((const unsigned long *)ipinfo->outiface_mask)[i];
132         }
133
134         if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
135                 dprintf("VIA out mismatch (%s vs %s).%s\n",
136                         outdev, ipinfo->outiface,
137                         ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
138                 return 0;
139         }
140
141         /* Check specific protocol */
142         if (ipinfo->proto
143             && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
144                 dprintf("Packet protocol %hi does not match %hi.%s\n",
145                         ip->protocol, ipinfo->proto,
146                         ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
147                 return 0;
148         }
149
150         /* If we have a fragment rule but the packet is not a fragment
151          * then we return zero */
152         if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
153                 dprintf("Fragment rule but not fragment.%s\n",
154                         ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
155                 return 0;
156         }
157
158         return 1;
159 }
160
161 static inline int
162 ip_checkentry(const struct ipt_ip *ip)
163 {
164         if (ip->flags & ~IPT_F_MASK) {
165                 duprintf("Unknown flag bits set: %08X\n",
166                          ip->flags & ~IPT_F_MASK);
167                 return 0;
168         }
169         if (ip->invflags & ~IPT_INV_MASK) {
170                 duprintf("Unknown invflag bits set: %08X\n",
171                          ip->invflags & ~IPT_INV_MASK);
172                 return 0;
173         }
174         return 1;
175 }
176
177 static unsigned int
178 ipt_error(struct sk_buff **pskb,
179           const struct net_device *in,
180           const struct net_device *out,
181           unsigned int hooknum,
182           const struct xt_target *target,
183           const void *targinfo,
184           void *userinfo)
185 {
186         if (net_ratelimit())
187                 printk("ip_tables: error: `%s'\n", (char *)targinfo);
188
189         return NF_DROP;
190 }
191
192 static inline
193 int do_match(struct ipt_entry_match *m,
194              const struct sk_buff *skb,
195              const struct net_device *in,
196              const struct net_device *out,
197              int offset,
198              int *hotdrop)
199 {
200         /* Stop iteration if it doesn't match */
201         if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
202                                       offset, skb->nh.iph->ihl*4, hotdrop))
203                 return 1;
204         else
205                 return 0;
206 }
207
208 static inline struct ipt_entry *
209 get_entry(void *base, unsigned int offset)
210 {
211         return (struct ipt_entry *)(base + offset);
212 }
213
214 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
215 unsigned int
216 ipt_do_table(struct sk_buff **pskb,
217              unsigned int hook,
218              const struct net_device *in,
219              const struct net_device *out,
220              struct ipt_table *table,
221              void *userdata)
222 {
223         static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
224         u_int16_t offset;
225         struct iphdr *ip;
226         u_int16_t datalen;
227         int hotdrop = 0;
228         /* Initializing verdict to NF_DROP keeps gcc happy. */
229         unsigned int verdict = NF_DROP;
230         const char *indev, *outdev;
231         void *table_base;
232         struct ipt_entry *e, *back;
233         struct xt_table_info *private = table->private;
234
235         /* Initialization */
236         ip = (*pskb)->nh.iph;
237         datalen = (*pskb)->len - ip->ihl * 4;
238         indev = in ? in->name : nulldevname;
239         outdev = out ? out->name : nulldevname;
240         /* We handle fragments by dealing with the first fragment as
241          * if it was a normal packet.  All other fragments are treated
242          * normally, except that they will NEVER match rules that ask
243          * things we don't know, ie. tcp syn flag or ports).  If the
244          * rule is also a fragment-specific rule, non-fragments won't
245          * match it. */
246         offset = ntohs(ip->frag_off) & IP_OFFSET;
247
248         read_lock_bh(&table->lock);
249         IP_NF_ASSERT(table->valid_hooks & (1 << hook));
250         table_base = (void *)private->entries[smp_processor_id()];
251         e = get_entry(table_base, private->hook_entry[hook]);
252
253         /* For return from builtin chain */
254         back = get_entry(table_base, private->underflow[hook]);
255
256         do {
257                 IP_NF_ASSERT(e);
258                 IP_NF_ASSERT(back);
259                 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
260                         struct ipt_entry_target *t;
261
262                         if (IPT_MATCH_ITERATE(e, do_match,
263                                               *pskb, in, out,
264                                               offset, &hotdrop) != 0)
265                                 goto no_match;
266
267                         ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
268
269                         t = ipt_get_target(e);
270                         IP_NF_ASSERT(t->u.kernel.target);
271                         /* Standard target? */
272                         if (!t->u.kernel.target->target) {
273                                 int v;
274
275                                 v = ((struct ipt_standard_target *)t)->verdict;
276                                 if (v < 0) {
277                                         /* Pop from stack? */
278                                         if (v != IPT_RETURN) {
279                                                 verdict = (unsigned)(-v) - 1;
280                                                 break;
281                                         }
282                                         e = back;
283                                         back = get_entry(table_base,
284                                                          back->comefrom);
285                                         continue;
286                                 }
287                                 if (table_base + v != (void *)e + e->next_offset
288                                     && !(e->ip.flags & IPT_F_GOTO)) {
289                                         /* Save old back ptr in next entry */
290                                         struct ipt_entry *next
291                                                 = (void *)e + e->next_offset;
292                                         next->comefrom
293                                                 = (void *)back - table_base;
294                                         /* set back pointer to next entry */
295                                         back = next;
296                                 }
297
298                                 e = get_entry(table_base, v);
299                         } else {
300                                 /* Targets which reenter must return
301                                    abs. verdicts */
302 #ifdef CONFIG_NETFILTER_DEBUG
303                                 ((struct ipt_entry *)table_base)->comefrom
304                                         = 0xeeeeeeec;
305 #endif
306                                 verdict = t->u.kernel.target->target(pskb,
307                                                                      in, out,
308                                                                      hook,
309                                                                      t->u.kernel.target,
310                                                                      t->data,
311                                                                      userdata);
312
313 #ifdef CONFIG_NETFILTER_DEBUG
314                                 if (((struct ipt_entry *)table_base)->comefrom
315                                     != 0xeeeeeeec
316                                     && verdict == IPT_CONTINUE) {
317                                         printk("Target %s reentered!\n",
318                                                t->u.kernel.target->name);
319                                         verdict = NF_DROP;
320                                 }
321                                 ((struct ipt_entry *)table_base)->comefrom
322                                         = 0x57acc001;
323 #endif
324                                 /* Target might have changed stuff. */
325                                 ip = (*pskb)->nh.iph;
326                                 datalen = (*pskb)->len - ip->ihl * 4;
327
328                                 if (verdict == IPT_CONTINUE)
329                                         e = (void *)e + e->next_offset;
330                                 else
331                                         /* Verdict */
332                                         break;
333                         }
334                 } else {
335
336                 no_match:
337                         e = (void *)e + e->next_offset;
338                 }
339         } while (!hotdrop);
340
341         read_unlock_bh(&table->lock);
342
343 #ifdef DEBUG_ALLOW_ALL
344         return NF_ACCEPT;
345 #else
346         if (hotdrop)
347                 return NF_DROP;
348         else return verdict;
349 #endif
350 }
351
352 /* All zeroes == unconditional rule. */
353 static inline int
354 unconditional(const struct ipt_ip *ip)
355 {
356         unsigned int i;
357
358         for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
359                 if (((__u32 *)ip)[i])
360                         return 0;
361
362         return 1;
363 }
364
365 /* Figures out from what hook each rule can be called: returns 0 if
366    there are loops.  Puts hook bitmask in comefrom. */
367 static int
368 mark_source_chains(struct xt_table_info *newinfo,
369                    unsigned int valid_hooks, void *entry0)
370 {
371         unsigned int hook;
372
373         /* No recursion; use packet counter to save back ptrs (reset
374            to 0 as we leave), and comefrom to save source hook bitmask */
375         for (hook = 0; hook < NF_IP_NUMHOOKS; hook++) {
376                 unsigned int pos = newinfo->hook_entry[hook];
377                 struct ipt_entry *e
378                         = (struct ipt_entry *)(entry0 + pos);
379
380                 if (!(valid_hooks & (1 << hook)))
381                         continue;
382
383                 /* Set initial back pointer. */
384                 e->counters.pcnt = pos;
385
386                 for (;;) {
387                         struct ipt_standard_target *t
388                                 = (void *)ipt_get_target(e);
389
390                         if (e->comefrom & (1 << NF_IP_NUMHOOKS)) {
391                                 printk("iptables: loop hook %u pos %u %08X.\n",
392                                        hook, pos, e->comefrom);
393                                 return 0;
394                         }
395                         e->comefrom
396                                 |= ((1 << hook) | (1 << NF_IP_NUMHOOKS));
397
398                         /* Unconditional return/END. */
399                         if (e->target_offset == sizeof(struct ipt_entry)
400                             && (strcmp(t->target.u.user.name,
401                                        IPT_STANDARD_TARGET) == 0)
402                             && t->verdict < 0
403                             && unconditional(&e->ip)) {
404                                 unsigned int oldpos, size;
405
406                                 /* Return: backtrack through the last
407                                    big jump. */
408                                 do {
409                                         e->comefrom ^= (1<<NF_IP_NUMHOOKS);
410 #ifdef DEBUG_IP_FIREWALL_USER
411                                         if (e->comefrom
412                                             & (1 << NF_IP_NUMHOOKS)) {
413                                                 duprintf("Back unset "
414                                                          "on hook %u "
415                                                          "rule %u\n",
416                                                          hook, pos);
417                                         }
418 #endif
419                                         oldpos = pos;
420                                         pos = e->counters.pcnt;
421                                         e->counters.pcnt = 0;
422
423                                         /* We're at the start. */
424                                         if (pos == oldpos)
425                                                 goto next;
426
427                                         e = (struct ipt_entry *)
428                                                 (entry0 + pos);
429                                 } while (oldpos == pos + e->next_offset);
430
431                                 /* Move along one */
432                                 size = e->next_offset;
433                                 e = (struct ipt_entry *)
434                                         (entry0 + pos + size);
435                                 e->counters.pcnt = pos;
436                                 pos += size;
437                         } else {
438                                 int newpos = t->verdict;
439
440                                 if (strcmp(t->target.u.user.name,
441                                            IPT_STANDARD_TARGET) == 0
442                                     && newpos >= 0) {
443                                         /* This a jump; chase it. */
444                                         duprintf("Jump rule %u -> %u\n",
445                                                  pos, newpos);
446                                 } else {
447                                         /* ... this is a fallthru */
448                                         newpos = pos + e->next_offset;
449                                 }
450                                 e = (struct ipt_entry *)
451                                         (entry0 + newpos);
452                                 e->counters.pcnt = pos;
453                                 pos = newpos;
454                         }
455                 }
456                 next:
457                 duprintf("Finished chain %u\n", hook);
458         }
459         return 1;
460 }
461
462 static inline int
463 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
464 {
465         if (i && (*i)-- == 0)
466                 return 1;
467
468         if (m->u.kernel.match->destroy)
469                 m->u.kernel.match->destroy(m->u.kernel.match, m->data,
470                                            m->u.match_size - sizeof(*m));
471         module_put(m->u.kernel.match->me);
472         return 0;
473 }
474
475 static inline int
476 standard_check(const struct ipt_entry_target *t,
477                unsigned int max_offset)
478 {
479         struct ipt_standard_target *targ = (void *)t;
480
481         /* Check standard info. */
482         if (targ->verdict >= 0
483             && targ->verdict > max_offset - sizeof(struct ipt_entry)) {
484                 duprintf("ipt_standard_check: bad verdict (%i)\n",
485                          targ->verdict);
486                 return 0;
487         }
488         if (targ->verdict < -NF_MAX_VERDICT - 1) {
489                 duprintf("ipt_standard_check: bad negative verdict (%i)\n",
490                          targ->verdict);
491                 return 0;
492         }
493         return 1;
494 }
495
496 static inline int
497 check_match(struct ipt_entry_match *m,
498             const char *name,
499             const struct ipt_ip *ip,
500             unsigned int hookmask,
501             unsigned int *i)
502 {
503         struct ipt_match *match;
504         int ret;
505
506         match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
507                                                    m->u.user.revision),
508                                         "ipt_%s", m->u.user.name);
509         if (IS_ERR(match) || !match) {
510                 duprintf("check_match: `%s' not found\n", m->u.user.name);
511                 return match ? PTR_ERR(match) : -ENOENT;
512         }
513         m->u.kernel.match = match;
514
515         ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
516                              name, hookmask, ip->proto,
517                              ip->invflags & IPT_INV_PROTO);
518         if (ret)
519                 goto err;
520
521         if (m->u.kernel.match->checkentry
522             && !m->u.kernel.match->checkentry(name, ip, match, m->data,
523                                               m->u.match_size - sizeof(*m),
524                                               hookmask)) {
525                 duprintf("ip_tables: check failed for `%s'.\n",
526                          m->u.kernel.match->name);
527                 ret = -EINVAL;
528                 goto err;
529         }
530
531         (*i)++;
532         return 0;
533 err:
534         module_put(m->u.kernel.match->me);
535         return ret;
536 }
537
538 static struct ipt_target ipt_standard_target;
539
540 static inline int
541 check_entry(struct ipt_entry *e, const char *name, unsigned int size,
542             unsigned int *i)
543 {
544         struct ipt_entry_target *t;
545         struct ipt_target *target;
546         int ret;
547         unsigned int j;
548
549         if (!ip_checkentry(&e->ip)) {
550                 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
551                 return -EINVAL;
552         }
553
554         j = 0;
555         ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
556         if (ret != 0)
557                 goto cleanup_matches;
558
559         t = ipt_get_target(e);
560         target = try_then_request_module(xt_find_target(AF_INET,
561                                                      t->u.user.name,
562                                                      t->u.user.revision),
563                                          "ipt_%s", t->u.user.name);
564         if (IS_ERR(target) || !target) {
565                 duprintf("check_entry: `%s' not found\n", t->u.user.name);
566                 ret = target ? PTR_ERR(target) : -ENOENT;
567                 goto cleanup_matches;
568         }
569         t->u.kernel.target = target;
570
571         ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
572                               name, e->comefrom, e->ip.proto,
573                               e->ip.invflags & IPT_INV_PROTO);
574         if (ret)
575                 goto err;
576
577         if (t->u.kernel.target == &ipt_standard_target) {
578                 if (!standard_check(t, size)) {
579                         ret = -EINVAL;
580                         goto cleanup_matches;
581                 }
582         } else if (t->u.kernel.target->checkentry
583                    && !t->u.kernel.target->checkentry(name, e, target, t->data,
584                                                       t->u.target_size
585                                                       - sizeof(*t),
586                                                       e->comefrom)) {
587                 duprintf("ip_tables: check failed for `%s'.\n",
588                          t->u.kernel.target->name);
589                 ret = -EINVAL;
590                 goto err;
591         }
592
593         (*i)++;
594         return 0;
595  err:
596         module_put(t->u.kernel.target->me);
597  cleanup_matches:
598         IPT_MATCH_ITERATE(e, cleanup_match, &j);
599         return ret;
600 }
601
602 static inline int
603 check_entry_size_and_hooks(struct ipt_entry *e,
604                            struct xt_table_info *newinfo,
605                            unsigned char *base,
606                            unsigned char *limit,
607                            const unsigned int *hook_entries,
608                            const unsigned int *underflows,
609                            unsigned int *i)
610 {
611         unsigned int h;
612
613         if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
614             || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
615                 duprintf("Bad offset %p\n", e);
616                 return -EINVAL;
617         }
618
619         if (e->next_offset
620             < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
621                 duprintf("checking: element %p size %u\n",
622                          e, e->next_offset);
623                 return -EINVAL;
624         }
625
626         /* Check hooks & underflows */
627         for (h = 0; h < NF_IP_NUMHOOKS; h++) {
628                 if ((unsigned char *)e - base == hook_entries[h])
629                         newinfo->hook_entry[h] = hook_entries[h];
630                 if ((unsigned char *)e - base == underflows[h])
631                         newinfo->underflow[h] = underflows[h];
632         }
633
634         /* FIXME: underflows must be unconditional, standard verdicts
635            < 0 (not IPT_RETURN). --RR */
636
637         /* Clear counters and comefrom */
638         e->counters = ((struct xt_counters) { 0, 0 });
639         e->comefrom = 0;
640
641         (*i)++;
642         return 0;
643 }
644
645 static inline int
646 cleanup_entry(struct ipt_entry *e, unsigned int *i)
647 {
648         struct ipt_entry_target *t;
649
650         if (i && (*i)-- == 0)
651                 return 1;
652
653         /* Cleanup all matches */
654         IPT_MATCH_ITERATE(e, cleanup_match, NULL);
655         t = ipt_get_target(e);
656         if (t->u.kernel.target->destroy)
657                 t->u.kernel.target->destroy(t->u.kernel.target, t->data,
658                                             t->u.target_size - sizeof(*t));
659         module_put(t->u.kernel.target->me);
660         return 0;
661 }
662
663 /* Checks and translates the user-supplied table segment (held in
664    newinfo) */
665 static int
666 translate_table(const char *name,
667                 unsigned int valid_hooks,
668                 struct xt_table_info *newinfo,
669                 void *entry0,
670                 unsigned int size,
671                 unsigned int number,
672                 const unsigned int *hook_entries,
673                 const unsigned int *underflows)
674 {
675         unsigned int i;
676         int ret;
677
678         newinfo->size = size;
679         newinfo->number = number;
680
681         /* Init all hooks to impossible value. */
682         for (i = 0; i < NF_IP_NUMHOOKS; i++) {
683                 newinfo->hook_entry[i] = 0xFFFFFFFF;
684                 newinfo->underflow[i] = 0xFFFFFFFF;
685         }
686
687         duprintf("translate_table: size %u\n", newinfo->size);
688         i = 0;
689         /* Walk through entries, checking offsets. */
690         ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
691                                 check_entry_size_and_hooks,
692                                 newinfo,
693                                 entry0,
694                                 entry0 + size,
695                                 hook_entries, underflows, &i);
696         if (ret != 0)
697                 return ret;
698
699         if (i != number) {
700                 duprintf("translate_table: %u not %u entries\n",
701                          i, number);
702                 return -EINVAL;
703         }
704
705         /* Check hooks all assigned */
706         for (i = 0; i < NF_IP_NUMHOOKS; i++) {
707                 /* Only hooks which are valid */
708                 if (!(valid_hooks & (1 << i)))
709                         continue;
710                 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
711                         duprintf("Invalid hook entry %u %u\n",
712                                  i, hook_entries[i]);
713                         return -EINVAL;
714                 }
715                 if (newinfo->underflow[i] == 0xFFFFFFFF) {
716                         duprintf("Invalid underflow %u %u\n",
717                                  i, underflows[i]);
718                         return -EINVAL;
719                 }
720         }
721
722         if (!mark_source_chains(newinfo, valid_hooks, entry0))
723                 return -ELOOP;
724
725         /* Finally, each sanity check must pass */
726         i = 0;
727         ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
728                                 check_entry, name, size, &i);
729
730         if (ret != 0) {
731                 IPT_ENTRY_ITERATE(entry0, newinfo->size,
732                                   cleanup_entry, &i);
733                 return ret;
734         }
735
736         /* And one copy for every other CPU */
737         for_each_cpu(i) {
738                 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
739                         memcpy(newinfo->entries[i], entry0, newinfo->size);
740         }
741
742         return ret;
743 }
744
745 /* Gets counters. */
746 static inline int
747 add_entry_to_counter(const struct ipt_entry *e,
748                      struct xt_counters total[],
749                      unsigned int *i)
750 {
751         ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
752
753         (*i)++;
754         return 0;
755 }
756
757 static inline int
758 set_entry_to_counter(const struct ipt_entry *e,
759                      struct ipt_counters total[],
760                      unsigned int *i)
761 {
762         SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
763
764         (*i)++;
765         return 0;
766 }
767
768 static void
769 get_counters(const struct xt_table_info *t,
770              struct xt_counters counters[])
771 {
772         unsigned int cpu;
773         unsigned int i;
774         unsigned int curcpu;
775
776         /* Instead of clearing (by a previous call to memset())
777          * the counters and using adds, we set the counters
778          * with data used by 'current' CPU
779          * We dont care about preemption here.
780          */
781         curcpu = raw_smp_processor_id();
782
783         i = 0;
784         IPT_ENTRY_ITERATE(t->entries[curcpu],
785                           t->size,
786                           set_entry_to_counter,
787                           counters,
788                           &i);
789
790         for_each_cpu(cpu) {
791                 if (cpu == curcpu)
792                         continue;
793                 i = 0;
794                 IPT_ENTRY_ITERATE(t->entries[cpu],
795                                   t->size,
796                                   add_entry_to_counter,
797                                   counters,
798                                   &i);
799         }
800 }
801
802 static int
803 copy_entries_to_user(unsigned int total_size,
804                      struct ipt_table *table,
805                      void __user *userptr)
806 {
807         unsigned int off, num, countersize;
808         struct ipt_entry *e;
809         struct xt_counters *counters;
810         struct xt_table_info *private = table->private;
811         int ret = 0;
812         void *loc_cpu_entry;
813
814         /* We need atomic snapshot of counters: rest doesn't change
815            (other than comefrom, which userspace doesn't care
816            about). */
817         countersize = sizeof(struct xt_counters) * private->number;
818         counters = vmalloc_node(countersize, numa_node_id());
819
820         if (counters == NULL)
821                 return -ENOMEM;
822
823         /* First, sum counters... */
824         write_lock_bh(&table->lock);
825         get_counters(private, counters);
826         write_unlock_bh(&table->lock);
827
828         /* choose the copy that is on our node/cpu, ...
829          * This choice is lazy (because current thread is
830          * allowed to migrate to another cpu)
831          */
832         loc_cpu_entry = private->entries[raw_smp_processor_id()];
833         /* ... then copy entire thing ... */
834         if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
835                 ret = -EFAULT;
836                 goto free_counters;
837         }
838
839         /* FIXME: use iterator macros --RR */
840         /* ... then go back and fix counters and names */
841         for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
842                 unsigned int i;
843                 struct ipt_entry_match *m;
844                 struct ipt_entry_target *t;
845
846                 e = (struct ipt_entry *)(loc_cpu_entry + off);
847                 if (copy_to_user(userptr + off
848                                  + offsetof(struct ipt_entry, counters),
849                                  &counters[num],
850                                  sizeof(counters[num])) != 0) {
851                         ret = -EFAULT;
852                         goto free_counters;
853                 }
854
855                 for (i = sizeof(struct ipt_entry);
856                      i < e->target_offset;
857                      i += m->u.match_size) {
858                         m = (void *)e + i;
859
860                         if (copy_to_user(userptr + off + i
861                                          + offsetof(struct ipt_entry_match,
862                                                     u.user.name),
863                                          m->u.kernel.match->name,
864                                          strlen(m->u.kernel.match->name)+1)
865                             != 0) {
866                                 ret = -EFAULT;
867                                 goto free_counters;
868                         }
869                 }
870
871                 t = ipt_get_target(e);
872                 if (copy_to_user(userptr + off + e->target_offset
873                                  + offsetof(struct ipt_entry_target,
874                                             u.user.name),
875                                  t->u.kernel.target->name,
876                                  strlen(t->u.kernel.target->name)+1) != 0) {
877                         ret = -EFAULT;
878                         goto free_counters;
879                 }
880         }
881
882  free_counters:
883         vfree(counters);
884         return ret;
885 }
886
887 static int
888 get_entries(const struct ipt_get_entries *entries,
889             struct ipt_get_entries __user *uptr)
890 {
891         int ret;
892         struct ipt_table *t;
893
894         t = xt_find_table_lock(AF_INET, entries->name);
895         if (t && !IS_ERR(t)) {
896                 struct xt_table_info *private = t->private;
897                 duprintf("t->private->number = %u\n",
898                          private->number);
899                 if (entries->size == private->size)
900                         ret = copy_entries_to_user(private->size,
901                                                    t, uptr->entrytable);
902                 else {
903                         duprintf("get_entries: I've got %u not %u!\n",
904                                  private->size,
905                                  entries->size);
906                         ret = -EINVAL;
907                 }
908                 module_put(t->me);
909                 xt_table_unlock(t);
910         } else
911                 ret = t ? PTR_ERR(t) : -ENOENT;
912
913         return ret;
914 }
915
916 static int
917 do_replace(void __user *user, unsigned int len)
918 {
919         int ret;
920         struct ipt_replace tmp;
921         struct ipt_table *t;
922         struct xt_table_info *newinfo, *oldinfo;
923         struct xt_counters *counters;
924         void *loc_cpu_entry, *loc_cpu_old_entry;
925
926         if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
927                 return -EFAULT;
928
929         /* Hack: Causes ipchains to give correct error msg --RR */
930         if (len != sizeof(tmp) + tmp.size)
931                 return -ENOPROTOOPT;
932
933         /* overflow check */
934         if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
935                         SMP_CACHE_BYTES)
936                 return -ENOMEM;
937         if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
938                 return -ENOMEM;
939
940         newinfo = xt_alloc_table_info(tmp.size);
941         if (!newinfo)
942                 return -ENOMEM;
943
944         /* choose the copy that is our node/cpu */
945         loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
946         if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
947                            tmp.size) != 0) {
948                 ret = -EFAULT;
949                 goto free_newinfo;
950         }
951
952         counters = vmalloc(tmp.num_counters * sizeof(struct xt_counters));
953         if (!counters) {
954                 ret = -ENOMEM;
955                 goto free_newinfo;
956         }
957
958         ret = translate_table(tmp.name, tmp.valid_hooks,
959                               newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
960                               tmp.hook_entry, tmp.underflow);
961         if (ret != 0)
962                 goto free_newinfo_counters;
963
964         duprintf("ip_tables: Translated table\n");
965
966         t = try_then_request_module(xt_find_table_lock(AF_INET, tmp.name),
967                                     "iptable_%s", tmp.name);
968         if (!t || IS_ERR(t)) {
969                 ret = t ? PTR_ERR(t) : -ENOENT;
970                 goto free_newinfo_counters_untrans;
971         }
972
973         /* You lied! */
974         if (tmp.valid_hooks != t->valid_hooks) {
975                 duprintf("Valid hook crap: %08X vs %08X\n",
976                          tmp.valid_hooks, t->valid_hooks);
977                 ret = -EINVAL;
978                 goto put_module;
979         }
980
981         oldinfo = xt_replace_table(t, tmp.num_counters, newinfo, &ret);
982         if (!oldinfo)
983                 goto put_module;
984
985         /* Update module usage count based on number of rules */
986         duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
987                 oldinfo->number, oldinfo->initial_entries, newinfo->number);
988         if ((oldinfo->number > oldinfo->initial_entries) || 
989             (newinfo->number <= oldinfo->initial_entries)) 
990                 module_put(t->me);
991         if ((oldinfo->number > oldinfo->initial_entries) &&
992             (newinfo->number <= oldinfo->initial_entries))
993                 module_put(t->me);
994
995         /* Get the old counters. */
996         get_counters(oldinfo, counters);
997         /* Decrease module usage counts and free resource */
998         loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
999         IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1000         xt_free_table_info(oldinfo);
1001         if (copy_to_user(tmp.counters, counters,
1002                          sizeof(struct xt_counters) * tmp.num_counters) != 0)
1003                 ret = -EFAULT;
1004         vfree(counters);
1005         xt_table_unlock(t);
1006         return ret;
1007
1008  put_module:
1009         module_put(t->me);
1010         xt_table_unlock(t);
1011  free_newinfo_counters_untrans:
1012         IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1013  free_newinfo_counters:
1014         vfree(counters);
1015  free_newinfo:
1016         xt_free_table_info(newinfo);
1017         return ret;
1018 }
1019
1020 /* We're lazy, and add to the first CPU; overflow works its fey magic
1021  * and everything is OK. */
1022 static inline int
1023 add_counter_to_entry(struct ipt_entry *e,
1024                      const struct xt_counters addme[],
1025                      unsigned int *i)
1026 {
1027 #if 0
1028         duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1029                  *i,
1030                  (long unsigned int)e->counters.pcnt,
1031                  (long unsigned int)e->counters.bcnt,
1032                  (long unsigned int)addme[*i].pcnt,
1033                  (long unsigned int)addme[*i].bcnt);
1034 #endif
1035
1036         ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1037
1038         (*i)++;
1039         return 0;
1040 }
1041
1042 static int
1043 do_add_counters(void __user *user, unsigned int len)
1044 {
1045         unsigned int i;
1046         struct xt_counters_info tmp, *paddc;
1047         struct ipt_table *t;
1048         struct xt_table_info *private;
1049         int ret = 0;
1050         void *loc_cpu_entry;
1051
1052         if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1053                 return -EFAULT;
1054
1055         if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct xt_counters))
1056                 return -EINVAL;
1057
1058         paddc = vmalloc_node(len, numa_node_id());
1059         if (!paddc)
1060                 return -ENOMEM;
1061
1062         if (copy_from_user(paddc, user, len) != 0) {
1063                 ret = -EFAULT;
1064                 goto free;
1065         }
1066
1067         t = xt_find_table_lock(AF_INET, tmp.name);
1068         if (!t || IS_ERR(t)) {
1069                 ret = t ? PTR_ERR(t) : -ENOENT;
1070                 goto free;
1071         }
1072
1073         write_lock_bh(&t->lock);
1074         private = t->private;
1075         if (private->number != paddc->num_counters) {
1076                 ret = -EINVAL;
1077                 goto unlock_up_free;
1078         }
1079
1080         i = 0;
1081         /* Choose the copy that is on our node */
1082         loc_cpu_entry = private->entries[raw_smp_processor_id()];
1083         IPT_ENTRY_ITERATE(loc_cpu_entry,
1084                           private->size,
1085                           add_counter_to_entry,
1086                           paddc->counters,
1087                           &i);
1088  unlock_up_free:
1089         write_unlock_bh(&t->lock);
1090         xt_table_unlock(t);
1091         module_put(t->me);
1092  free:
1093         vfree(paddc);
1094
1095         return ret;
1096 }
1097
1098 static int
1099 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1100 {
1101         int ret;
1102
1103         if (!capable(CAP_NET_ADMIN))
1104                 return -EPERM;
1105
1106         switch (cmd) {
1107         case IPT_SO_SET_REPLACE:
1108                 ret = do_replace(user, len);
1109                 break;
1110
1111         case IPT_SO_SET_ADD_COUNTERS:
1112                 ret = do_add_counters(user, len);
1113                 break;
1114
1115         default:
1116                 duprintf("do_ipt_set_ctl:  unknown request %i\n", cmd);
1117                 ret = -EINVAL;
1118         }
1119
1120         return ret;
1121 }
1122
1123 static int
1124 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1125 {
1126         int ret;
1127
1128         if (!capable(CAP_NET_ADMIN))
1129                 return -EPERM;
1130
1131         switch (cmd) {
1132         case IPT_SO_GET_INFO: {
1133                 char name[IPT_TABLE_MAXNAMELEN];
1134                 struct ipt_table *t;
1135
1136                 if (*len != sizeof(struct ipt_getinfo)) {
1137                         duprintf("length %u != %u\n", *len,
1138                                  sizeof(struct ipt_getinfo));
1139                         ret = -EINVAL;
1140                         break;
1141                 }
1142
1143                 if (copy_from_user(name, user, sizeof(name)) != 0) {
1144                         ret = -EFAULT;
1145                         break;
1146                 }
1147                 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1148
1149                 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1150                                             "iptable_%s", name);
1151                 if (t && !IS_ERR(t)) {
1152                         struct ipt_getinfo info;
1153                         struct xt_table_info *private = t->private;
1154
1155                         info.valid_hooks = t->valid_hooks;
1156                         memcpy(info.hook_entry, private->hook_entry,
1157                                sizeof(info.hook_entry));
1158                         memcpy(info.underflow, private->underflow,
1159                                sizeof(info.underflow));
1160                         info.num_entries = private->number;
1161                         info.size = private->size;
1162                         memcpy(info.name, name, sizeof(info.name));
1163
1164                         if (copy_to_user(user, &info, *len) != 0)
1165                                 ret = -EFAULT;
1166                         else
1167                                 ret = 0;
1168                         xt_table_unlock(t);
1169                         module_put(t->me);
1170                 } else
1171                         ret = t ? PTR_ERR(t) : -ENOENT;
1172         }
1173         break;
1174
1175         case IPT_SO_GET_ENTRIES: {
1176                 struct ipt_get_entries get;
1177
1178                 if (*len < sizeof(get)) {
1179                         duprintf("get_entries: %u < %u\n", *len, sizeof(get));
1180                         ret = -EINVAL;
1181                 } else if (copy_from_user(&get, user, sizeof(get)) != 0) {
1182                         ret = -EFAULT;
1183                 } else if (*len != sizeof(struct ipt_get_entries) + get.size) {
1184                         duprintf("get_entries: %u != %u\n", *len,
1185                                  sizeof(struct ipt_get_entries) + get.size);
1186                         ret = -EINVAL;
1187                 } else
1188                         ret = get_entries(&get, user);
1189                 break;
1190         }
1191
1192         case IPT_SO_GET_REVISION_MATCH:
1193         case IPT_SO_GET_REVISION_TARGET: {
1194                 struct ipt_get_revision rev;
1195                 int target;
1196
1197                 if (*len != sizeof(rev)) {
1198                         ret = -EINVAL;
1199                         break;
1200                 }
1201                 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1202                         ret = -EFAULT;
1203                         break;
1204                 }
1205
1206                 if (cmd == IPT_SO_GET_REVISION_TARGET)
1207                         target = 1;
1208                 else
1209                         target = 0;
1210
1211                 try_then_request_module(xt_find_revision(AF_INET, rev.name,
1212                                                          rev.revision,
1213                                                          target, &ret),
1214                                         "ipt_%s", rev.name);
1215                 break;
1216         }
1217
1218         default:
1219                 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
1220                 ret = -EINVAL;
1221         }
1222
1223         return ret;
1224 }
1225
1226 int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
1227 {
1228         int ret;
1229         struct xt_table_info *newinfo;
1230         static struct xt_table_info bootstrap
1231                 = { 0, 0, 0, { 0 }, { 0 }, { } };
1232         void *loc_cpu_entry;
1233
1234         newinfo = xt_alloc_table_info(repl->size);
1235         if (!newinfo)
1236                 return -ENOMEM;
1237
1238         /* choose the copy on our node/cpu
1239          * but dont care of preemption
1240          */
1241         loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1242         memcpy(loc_cpu_entry, repl->entries, repl->size);
1243
1244         ret = translate_table(table->name, table->valid_hooks,
1245                               newinfo, loc_cpu_entry, repl->size,
1246                               repl->num_entries,
1247                               repl->hook_entry,
1248                               repl->underflow);
1249         if (ret != 0) {
1250                 xt_free_table_info(newinfo);
1251                 return ret;
1252         }
1253
1254         if (xt_register_table(table, &bootstrap, newinfo) != 0) {
1255                 xt_free_table_info(newinfo);
1256                 return ret;
1257         }
1258
1259         return 0;
1260 }
1261
1262 void ipt_unregister_table(struct ipt_table *table)
1263 {
1264         struct xt_table_info *private;
1265         void *loc_cpu_entry;
1266
1267         private = xt_unregister_table(table);
1268
1269         /* Decrease module usage counts and free resources */
1270         loc_cpu_entry = private->entries[raw_smp_processor_id()];
1271         IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
1272         xt_free_table_info(private);
1273 }
1274
1275 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1276 static inline int
1277 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
1278                      u_int8_t type, u_int8_t code,
1279                      int invert)
1280 {
1281         return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
1282                 ^ invert;
1283 }
1284
1285 static int
1286 icmp_match(const struct sk_buff *skb,
1287            const struct net_device *in,
1288            const struct net_device *out,
1289            const struct xt_match *match,
1290            const void *matchinfo,
1291            int offset,
1292            unsigned int protoff,
1293            int *hotdrop)
1294 {
1295         struct icmphdr _icmph, *ic;
1296         const struct ipt_icmp *icmpinfo = matchinfo;
1297
1298         /* Must not be a fragment. */
1299         if (offset)
1300                 return 0;
1301
1302         ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
1303         if (ic == NULL) {
1304                 /* We've been asked to examine this packet, and we
1305                  * can't.  Hence, no choice but to drop.
1306                  */
1307                 duprintf("Dropping evil ICMP tinygram.\n");
1308                 *hotdrop = 1;
1309                 return 0;
1310         }
1311
1312         return icmp_type_code_match(icmpinfo->type,
1313                                     icmpinfo->code[0],
1314                                     icmpinfo->code[1],
1315                                     ic->type, ic->code,
1316                                     !!(icmpinfo->invflags&IPT_ICMP_INV));
1317 }
1318
1319 /* Called when user tries to insert an entry of this type. */
1320 static int
1321 icmp_checkentry(const char *tablename,
1322            const void *info,
1323            const struct xt_match *match,
1324            void *matchinfo,
1325            unsigned int matchsize,
1326            unsigned int hook_mask)
1327 {
1328         const struct ipt_icmp *icmpinfo = matchinfo;
1329
1330         /* Must specify no unknown invflags */
1331         return !(icmpinfo->invflags & ~IPT_ICMP_INV);
1332 }
1333
1334 /* The built-in targets: standard (NULL) and error. */
1335 static struct ipt_target ipt_standard_target = {
1336         .name           = IPT_STANDARD_TARGET,
1337         .targetsize     = sizeof(int),
1338         .family         = AF_INET,
1339 };
1340
1341 static struct ipt_target ipt_error_target = {
1342         .name           = IPT_ERROR_TARGET,
1343         .target         = ipt_error,
1344         .targetsize     = IPT_FUNCTION_MAXNAMELEN,
1345         .family         = AF_INET,
1346 };
1347
1348 static struct nf_sockopt_ops ipt_sockopts = {
1349         .pf             = PF_INET,
1350         .set_optmin     = IPT_BASE_CTL,
1351         .set_optmax     = IPT_SO_SET_MAX+1,
1352         .set            = do_ipt_set_ctl,
1353         .get_optmin     = IPT_BASE_CTL,
1354         .get_optmax     = IPT_SO_GET_MAX+1,
1355         .get            = do_ipt_get_ctl,
1356 };
1357
1358 static struct ipt_match icmp_matchstruct = {
1359         .name           = "icmp",
1360         .match          = icmp_match,
1361         .matchsize      = sizeof(struct ipt_icmp),
1362         .proto          = IPPROTO_ICMP,
1363         .family         = AF_INET,
1364         .checkentry     = icmp_checkentry,
1365 };
1366
1367 static int __init ip_tables_init(void)
1368 {
1369         int ret;
1370
1371         xt_proto_init(AF_INET);
1372
1373         /* Noone else will be downing sem now, so we won't sleep */
1374         xt_register_target(&ipt_standard_target);
1375         xt_register_target(&ipt_error_target);
1376         xt_register_match(&icmp_matchstruct);
1377
1378         /* Register setsockopt */
1379         ret = nf_register_sockopt(&ipt_sockopts);
1380         if (ret < 0) {
1381                 duprintf("Unable to register sockopts.\n");
1382                 return ret;
1383         }
1384
1385         printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
1386         return 0;
1387 }
1388
1389 static void __exit ip_tables_fini(void)
1390 {
1391         nf_unregister_sockopt(&ipt_sockopts);
1392
1393         xt_unregister_match(&icmp_matchstruct);
1394         xt_unregister_target(&ipt_error_target);
1395         xt_unregister_target(&ipt_standard_target);
1396
1397         xt_proto_fini(AF_INET);
1398 }
1399
1400 EXPORT_SYMBOL(ipt_register_table);
1401 EXPORT_SYMBOL(ipt_unregister_table);
1402 EXPORT_SYMBOL(ipt_do_table);
1403 module_init(ip_tables_init);
1404 module_exit(ip_tables_fini);