[NETFILTER]: Missing check for CAP_NET_ADMIN in iptables compat layer
[safe/jmp/linux-2.6] / net / ipv4 / netfilter / ip_tables.c
1 /*
2  * Packet matching code.
3  *
4  * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5  * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
12  *      - increase module usage count as soon as we have rules inside
13  *        a table
14  * 08 Oct 2005 Harald Welte <lafore@netfilter.org>
15  *      - Generalize into "x_tables" layer and "{ip,ip6,arp}_tables"
16  */
17 #include <linux/cache.h>
18 #include <linux/capability.h>
19 #include <linux/skbuff.h>
20 #include <linux/kmod.h>
21 #include <linux/vmalloc.h>
22 #include <linux/netdevice.h>
23 #include <linux/module.h>
24 #include <linux/icmp.h>
25 #include <net/ip.h>
26 #include <net/compat.h>
27 #include <asm/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
32
33 #include <linux/netfilter/x_tables.h>
34 #include <linux/netfilter_ipv4/ip_tables.h>
35
36 MODULE_LICENSE("GPL");
37 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
38 MODULE_DESCRIPTION("IPv4 packet filter");
39
40 /*#define DEBUG_IP_FIREWALL*/
41 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
42 /*#define DEBUG_IP_FIREWALL_USER*/
43
44 #ifdef DEBUG_IP_FIREWALL
45 #define dprintf(format, args...)  printk(format , ## args)
46 #else
47 #define dprintf(format, args...)
48 #endif
49
50 #ifdef DEBUG_IP_FIREWALL_USER
51 #define duprintf(format, args...) printk(format , ## args)
52 #else
53 #define duprintf(format, args...)
54 #endif
55
56 #ifdef CONFIG_NETFILTER_DEBUG
57 #define IP_NF_ASSERT(x)                                         \
58 do {                                                            \
59         if (!(x))                                               \
60                 printk("IP_NF_ASSERT: %s:%s:%u\n",              \
61                        __FUNCTION__, __FILE__, __LINE__);       \
62 } while(0)
63 #else
64 #define IP_NF_ASSERT(x)
65 #endif
66
67 #if 0
68 /* All the better to debug you with... */
69 #define static
70 #define inline
71 #endif
72
73 /*
74    We keep a set of rules for each CPU, so we can avoid write-locking
75    them in the softirq when updating the counters and therefore
76    only need to read-lock in the softirq; doing a write_lock_bh() in user
77    context stops packets coming through and allows user context to read
78    the counters or update the rules.
79
80    Hence the start of any table is given by get_table() below.  */
81
82 /* Returns whether matches rule or not. */
83 static inline int
84 ip_packet_match(const struct iphdr *ip,
85                 const char *indev,
86                 const char *outdev,
87                 const struct ipt_ip *ipinfo,
88                 int isfrag)
89 {
90         size_t i;
91         unsigned long ret;
92
93 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
94
95         if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
96                   IPT_INV_SRCIP)
97             || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
98                      IPT_INV_DSTIP)) {
99                 dprintf("Source or dest mismatch.\n");
100
101                 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
102                         NIPQUAD(ip->saddr),
103                         NIPQUAD(ipinfo->smsk.s_addr),
104                         NIPQUAD(ipinfo->src.s_addr),
105                         ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
106                 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
107                         NIPQUAD(ip->daddr),
108                         NIPQUAD(ipinfo->dmsk.s_addr),
109                         NIPQUAD(ipinfo->dst.s_addr),
110                         ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
111                 return 0;
112         }
113
114         /* Look for ifname matches; this should unroll nicely. */
115         for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
116                 ret |= (((const unsigned long *)indev)[i]
117                         ^ ((const unsigned long *)ipinfo->iniface)[i])
118                         & ((const unsigned long *)ipinfo->iniface_mask)[i];
119         }
120
121         if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
122                 dprintf("VIA in mismatch (%s vs %s).%s\n",
123                         indev, ipinfo->iniface,
124                         ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
125                 return 0;
126         }
127
128         for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
129                 ret |= (((const unsigned long *)outdev)[i]
130                         ^ ((const unsigned long *)ipinfo->outiface)[i])
131                         & ((const unsigned long *)ipinfo->outiface_mask)[i];
132         }
133
134         if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
135                 dprintf("VIA out mismatch (%s vs %s).%s\n",
136                         outdev, ipinfo->outiface,
137                         ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
138                 return 0;
139         }
140
141         /* Check specific protocol */
142         if (ipinfo->proto
143             && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
144                 dprintf("Packet protocol %hi does not match %hi.%s\n",
145                         ip->protocol, ipinfo->proto,
146                         ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
147                 return 0;
148         }
149
150         /* If we have a fragment rule but the packet is not a fragment
151          * then we return zero */
152         if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
153                 dprintf("Fragment rule but not fragment.%s\n",
154                         ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
155                 return 0;
156         }
157
158         return 1;
159 }
160
161 static inline int
162 ip_checkentry(const struct ipt_ip *ip)
163 {
164         if (ip->flags & ~IPT_F_MASK) {
165                 duprintf("Unknown flag bits set: %08X\n",
166                          ip->flags & ~IPT_F_MASK);
167                 return 0;
168         }
169         if (ip->invflags & ~IPT_INV_MASK) {
170                 duprintf("Unknown invflag bits set: %08X\n",
171                          ip->invflags & ~IPT_INV_MASK);
172                 return 0;
173         }
174         return 1;
175 }
176
177 static unsigned int
178 ipt_error(struct sk_buff **pskb,
179           const struct net_device *in,
180           const struct net_device *out,
181           unsigned int hooknum,
182           const struct xt_target *target,
183           const void *targinfo)
184 {
185         if (net_ratelimit())
186                 printk("ip_tables: error: `%s'\n", (char *)targinfo);
187
188         return NF_DROP;
189 }
190
191 static inline
192 int do_match(struct ipt_entry_match *m,
193              const struct sk_buff *skb,
194              const struct net_device *in,
195              const struct net_device *out,
196              int offset,
197              int *hotdrop)
198 {
199         /* Stop iteration if it doesn't match */
200         if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
201                                       offset, skb->nh.iph->ihl*4, hotdrop))
202                 return 1;
203         else
204                 return 0;
205 }
206
207 static inline struct ipt_entry *
208 get_entry(void *base, unsigned int offset)
209 {
210         return (struct ipt_entry *)(base + offset);
211 }
212
213 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
214 unsigned int
215 ipt_do_table(struct sk_buff **pskb,
216              unsigned int hook,
217              const struct net_device *in,
218              const struct net_device *out,
219              struct ipt_table *table)
220 {
221         static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
222         u_int16_t offset;
223         struct iphdr *ip;
224         u_int16_t datalen;
225         int hotdrop = 0;
226         /* Initializing verdict to NF_DROP keeps gcc happy. */
227         unsigned int verdict = NF_DROP;
228         const char *indev, *outdev;
229         void *table_base;
230         struct ipt_entry *e, *back;
231         struct xt_table_info *private;
232
233         /* Initialization */
234         ip = (*pskb)->nh.iph;
235         datalen = (*pskb)->len - ip->ihl * 4;
236         indev = in ? in->name : nulldevname;
237         outdev = out ? out->name : nulldevname;
238         /* We handle fragments by dealing with the first fragment as
239          * if it was a normal packet.  All other fragments are treated
240          * normally, except that they will NEVER match rules that ask
241          * things we don't know, ie. tcp syn flag or ports).  If the
242          * rule is also a fragment-specific rule, non-fragments won't
243          * match it. */
244         offset = ntohs(ip->frag_off) & IP_OFFSET;
245
246         read_lock_bh(&table->lock);
247         IP_NF_ASSERT(table->valid_hooks & (1 << hook));
248         private = table->private;
249         table_base = (void *)private->entries[smp_processor_id()];
250         e = get_entry(table_base, private->hook_entry[hook]);
251
252         /* For return from builtin chain */
253         back = get_entry(table_base, private->underflow[hook]);
254
255         do {
256                 IP_NF_ASSERT(e);
257                 IP_NF_ASSERT(back);
258                 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
259                         struct ipt_entry_target *t;
260
261                         if (IPT_MATCH_ITERATE(e, do_match,
262                                               *pskb, in, out,
263                                               offset, &hotdrop) != 0)
264                                 goto no_match;
265
266                         ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
267
268                         t = ipt_get_target(e);
269                         IP_NF_ASSERT(t->u.kernel.target);
270                         /* Standard target? */
271                         if (!t->u.kernel.target->target) {
272                                 int v;
273
274                                 v = ((struct ipt_standard_target *)t)->verdict;
275                                 if (v < 0) {
276                                         /* Pop from stack? */
277                                         if (v != IPT_RETURN) {
278                                                 verdict = (unsigned)(-v) - 1;
279                                                 break;
280                                         }
281                                         e = back;
282                                         back = get_entry(table_base,
283                                                          back->comefrom);
284                                         continue;
285                                 }
286                                 if (table_base + v != (void *)e + e->next_offset
287                                     && !(e->ip.flags & IPT_F_GOTO)) {
288                                         /* Save old back ptr in next entry */
289                                         struct ipt_entry *next
290                                                 = (void *)e + e->next_offset;
291                                         next->comefrom
292                                                 = (void *)back - table_base;
293                                         /* set back pointer to next entry */
294                                         back = next;
295                                 }
296
297                                 e = get_entry(table_base, v);
298                         } else {
299                                 /* Targets which reenter must return
300                                    abs. verdicts */
301 #ifdef CONFIG_NETFILTER_DEBUG
302                                 ((struct ipt_entry *)table_base)->comefrom
303                                         = 0xeeeeeeec;
304 #endif
305                                 verdict = t->u.kernel.target->target(pskb,
306                                                                      in, out,
307                                                                      hook,
308                                                                      t->u.kernel.target,
309                                                                      t->data);
310
311 #ifdef CONFIG_NETFILTER_DEBUG
312                                 if (((struct ipt_entry *)table_base)->comefrom
313                                     != 0xeeeeeeec
314                                     && verdict == IPT_CONTINUE) {
315                                         printk("Target %s reentered!\n",
316                                                t->u.kernel.target->name);
317                                         verdict = NF_DROP;
318                                 }
319                                 ((struct ipt_entry *)table_base)->comefrom
320                                         = 0x57acc001;
321 #endif
322                                 /* Target might have changed stuff. */
323                                 ip = (*pskb)->nh.iph;
324                                 datalen = (*pskb)->len - ip->ihl * 4;
325
326                                 if (verdict == IPT_CONTINUE)
327                                         e = (void *)e + e->next_offset;
328                                 else
329                                         /* Verdict */
330                                         break;
331                         }
332                 } else {
333
334                 no_match:
335                         e = (void *)e + e->next_offset;
336                 }
337         } while (!hotdrop);
338
339         read_unlock_bh(&table->lock);
340
341 #ifdef DEBUG_ALLOW_ALL
342         return NF_ACCEPT;
343 #else
344         if (hotdrop)
345                 return NF_DROP;
346         else return verdict;
347 #endif
348 }
349
350 /* All zeroes == unconditional rule. */
351 static inline int
352 unconditional(const struct ipt_ip *ip)
353 {
354         unsigned int i;
355
356         for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
357                 if (((__u32 *)ip)[i])
358                         return 0;
359
360         return 1;
361 }
362
363 /* Figures out from what hook each rule can be called: returns 0 if
364    there are loops.  Puts hook bitmask in comefrom. */
365 static int
366 mark_source_chains(struct xt_table_info *newinfo,
367                    unsigned int valid_hooks, void *entry0)
368 {
369         unsigned int hook;
370
371         /* No recursion; use packet counter to save back ptrs (reset
372            to 0 as we leave), and comefrom to save source hook bitmask */
373         for (hook = 0; hook < NF_IP_NUMHOOKS; hook++) {
374                 unsigned int pos = newinfo->hook_entry[hook];
375                 struct ipt_entry *e
376                         = (struct ipt_entry *)(entry0 + pos);
377
378                 if (!(valid_hooks & (1 << hook)))
379                         continue;
380
381                 /* Set initial back pointer. */
382                 e->counters.pcnt = pos;
383
384                 for (;;) {
385                         struct ipt_standard_target *t
386                                 = (void *)ipt_get_target(e);
387
388                         if (e->comefrom & (1 << NF_IP_NUMHOOKS)) {
389                                 printk("iptables: loop hook %u pos %u %08X.\n",
390                                        hook, pos, e->comefrom);
391                                 return 0;
392                         }
393                         e->comefrom
394                                 |= ((1 << hook) | (1 << NF_IP_NUMHOOKS));
395
396                         /* Unconditional return/END. */
397                         if (e->target_offset == sizeof(struct ipt_entry)
398                             && (strcmp(t->target.u.user.name,
399                                        IPT_STANDARD_TARGET) == 0)
400                             && t->verdict < 0
401                             && unconditional(&e->ip)) {
402                                 unsigned int oldpos, size;
403
404                                 /* Return: backtrack through the last
405                                    big jump. */
406                                 do {
407                                         e->comefrom ^= (1<<NF_IP_NUMHOOKS);
408 #ifdef DEBUG_IP_FIREWALL_USER
409                                         if (e->comefrom
410                                             & (1 << NF_IP_NUMHOOKS)) {
411                                                 duprintf("Back unset "
412                                                          "on hook %u "
413                                                          "rule %u\n",
414                                                          hook, pos);
415                                         }
416 #endif
417                                         oldpos = pos;
418                                         pos = e->counters.pcnt;
419                                         e->counters.pcnt = 0;
420
421                                         /* We're at the start. */
422                                         if (pos == oldpos)
423                                                 goto next;
424
425                                         e = (struct ipt_entry *)
426                                                 (entry0 + pos);
427                                 } while (oldpos == pos + e->next_offset);
428
429                                 /* Move along one */
430                                 size = e->next_offset;
431                                 e = (struct ipt_entry *)
432                                         (entry0 + pos + size);
433                                 e->counters.pcnt = pos;
434                                 pos += size;
435                         } else {
436                                 int newpos = t->verdict;
437
438                                 if (strcmp(t->target.u.user.name,
439                                            IPT_STANDARD_TARGET) == 0
440                                     && newpos >= 0) {
441                                         /* This a jump; chase it. */
442                                         duprintf("Jump rule %u -> %u\n",
443                                                  pos, newpos);
444                                 } else {
445                                         /* ... this is a fallthru */
446                                         newpos = pos + e->next_offset;
447                                 }
448                                 e = (struct ipt_entry *)
449                                         (entry0 + newpos);
450                                 e->counters.pcnt = pos;
451                                 pos = newpos;
452                         }
453                 }
454                 next:
455                 duprintf("Finished chain %u\n", hook);
456         }
457         return 1;
458 }
459
460 static inline int
461 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
462 {
463         if (i && (*i)-- == 0)
464                 return 1;
465
466         if (m->u.kernel.match->destroy)
467                 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
468         module_put(m->u.kernel.match->me);
469         return 0;
470 }
471
472 static inline int
473 standard_check(const struct ipt_entry_target *t,
474                unsigned int max_offset)
475 {
476         struct ipt_standard_target *targ = (void *)t;
477
478         /* Check standard info. */
479         if (targ->verdict >= 0
480             && targ->verdict > max_offset - sizeof(struct ipt_entry)) {
481                 duprintf("ipt_standard_check: bad verdict (%i)\n",
482                          targ->verdict);
483                 return 0;
484         }
485         if (targ->verdict < -NF_MAX_VERDICT - 1) {
486                 duprintf("ipt_standard_check: bad negative verdict (%i)\n",
487                          targ->verdict);
488                 return 0;
489         }
490         return 1;
491 }
492
493 static inline int
494 check_match(struct ipt_entry_match *m,
495             const char *name,
496             const struct ipt_ip *ip,
497             unsigned int hookmask,
498             unsigned int *i)
499 {
500         struct ipt_match *match;
501         int ret;
502
503         match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
504                                                    m->u.user.revision),
505                                         "ipt_%s", m->u.user.name);
506         if (IS_ERR(match) || !match) {
507                 duprintf("check_match: `%s' not found\n", m->u.user.name);
508                 return match ? PTR_ERR(match) : -ENOENT;
509         }
510         m->u.kernel.match = match;
511
512         ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
513                              name, hookmask, ip->proto,
514                              ip->invflags & IPT_INV_PROTO);
515         if (ret)
516                 goto err;
517
518         if (m->u.kernel.match->checkentry
519             && !m->u.kernel.match->checkentry(name, ip, match, m->data,
520                                               hookmask)) {
521                 duprintf("ip_tables: check failed for `%s'.\n",
522                          m->u.kernel.match->name);
523                 ret = -EINVAL;
524                 goto err;
525         }
526
527         (*i)++;
528         return 0;
529 err:
530         module_put(m->u.kernel.match->me);
531         return ret;
532 }
533
534 static struct ipt_target ipt_standard_target;
535
536 static inline int
537 check_entry(struct ipt_entry *e, const char *name, unsigned int size,
538             unsigned int *i)
539 {
540         struct ipt_entry_target *t;
541         struct ipt_target *target;
542         int ret;
543         unsigned int j;
544
545         if (!ip_checkentry(&e->ip)) {
546                 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
547                 return -EINVAL;
548         }
549
550         j = 0;
551         ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
552         if (ret != 0)
553                 goto cleanup_matches;
554
555         t = ipt_get_target(e);
556         target = try_then_request_module(xt_find_target(AF_INET,
557                                                      t->u.user.name,
558                                                      t->u.user.revision),
559                                          "ipt_%s", t->u.user.name);
560         if (IS_ERR(target) || !target) {
561                 duprintf("check_entry: `%s' not found\n", t->u.user.name);
562                 ret = target ? PTR_ERR(target) : -ENOENT;
563                 goto cleanup_matches;
564         }
565         t->u.kernel.target = target;
566
567         ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
568                               name, e->comefrom, e->ip.proto,
569                               e->ip.invflags & IPT_INV_PROTO);
570         if (ret)
571                 goto err;
572
573         if (t->u.kernel.target == &ipt_standard_target) {
574                 if (!standard_check(t, size)) {
575                         ret = -EINVAL;
576                         goto err;
577                 }
578         } else if (t->u.kernel.target->checkentry
579                    && !t->u.kernel.target->checkentry(name, e, target, t->data,
580                                                       e->comefrom)) {
581                 duprintf("ip_tables: check failed for `%s'.\n",
582                          t->u.kernel.target->name);
583                 ret = -EINVAL;
584                 goto err;
585         }
586
587         (*i)++;
588         return 0;
589  err:
590         module_put(t->u.kernel.target->me);
591  cleanup_matches:
592         IPT_MATCH_ITERATE(e, cleanup_match, &j);
593         return ret;
594 }
595
596 static inline int
597 check_entry_size_and_hooks(struct ipt_entry *e,
598                            struct xt_table_info *newinfo,
599                            unsigned char *base,
600                            unsigned char *limit,
601                            const unsigned int *hook_entries,
602                            const unsigned int *underflows,
603                            unsigned int *i)
604 {
605         unsigned int h;
606
607         if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
608             || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
609                 duprintf("Bad offset %p\n", e);
610                 return -EINVAL;
611         }
612
613         if (e->next_offset
614             < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
615                 duprintf("checking: element %p size %u\n",
616                          e, e->next_offset);
617                 return -EINVAL;
618         }
619
620         /* Check hooks & underflows */
621         for (h = 0; h < NF_IP_NUMHOOKS; h++) {
622                 if ((unsigned char *)e - base == hook_entries[h])
623                         newinfo->hook_entry[h] = hook_entries[h];
624                 if ((unsigned char *)e - base == underflows[h])
625                         newinfo->underflow[h] = underflows[h];
626         }
627
628         /* FIXME: underflows must be unconditional, standard verdicts
629            < 0 (not IPT_RETURN). --RR */
630
631         /* Clear counters and comefrom */
632         e->counters = ((struct xt_counters) { 0, 0 });
633         e->comefrom = 0;
634
635         (*i)++;
636         return 0;
637 }
638
639 static inline int
640 cleanup_entry(struct ipt_entry *e, unsigned int *i)
641 {
642         struct ipt_entry_target *t;
643
644         if (i && (*i)-- == 0)
645                 return 1;
646
647         /* Cleanup all matches */
648         IPT_MATCH_ITERATE(e, cleanup_match, NULL);
649         t = ipt_get_target(e);
650         if (t->u.kernel.target->destroy)
651                 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
652         module_put(t->u.kernel.target->me);
653         return 0;
654 }
655
656 /* Checks and translates the user-supplied table segment (held in
657    newinfo) */
658 static int
659 translate_table(const char *name,
660                 unsigned int valid_hooks,
661                 struct xt_table_info *newinfo,
662                 void *entry0,
663                 unsigned int size,
664                 unsigned int number,
665                 const unsigned int *hook_entries,
666                 const unsigned int *underflows)
667 {
668         unsigned int i;
669         int ret;
670
671         newinfo->size = size;
672         newinfo->number = number;
673
674         /* Init all hooks to impossible value. */
675         for (i = 0; i < NF_IP_NUMHOOKS; i++) {
676                 newinfo->hook_entry[i] = 0xFFFFFFFF;
677                 newinfo->underflow[i] = 0xFFFFFFFF;
678         }
679
680         duprintf("translate_table: size %u\n", newinfo->size);
681         i = 0;
682         /* Walk through entries, checking offsets. */
683         ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
684                                 check_entry_size_and_hooks,
685                                 newinfo,
686                                 entry0,
687                                 entry0 + size,
688                                 hook_entries, underflows, &i);
689         if (ret != 0)
690                 return ret;
691
692         if (i != number) {
693                 duprintf("translate_table: %u not %u entries\n",
694                          i, number);
695                 return -EINVAL;
696         }
697
698         /* Check hooks all assigned */
699         for (i = 0; i < NF_IP_NUMHOOKS; i++) {
700                 /* Only hooks which are valid */
701                 if (!(valid_hooks & (1 << i)))
702                         continue;
703                 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
704                         duprintf("Invalid hook entry %u %u\n",
705                                  i, hook_entries[i]);
706                         return -EINVAL;
707                 }
708                 if (newinfo->underflow[i] == 0xFFFFFFFF) {
709                         duprintf("Invalid underflow %u %u\n",
710                                  i, underflows[i]);
711                         return -EINVAL;
712                 }
713         }
714
715         if (!mark_source_chains(newinfo, valid_hooks, entry0))
716                 return -ELOOP;
717
718         /* Finally, each sanity check must pass */
719         i = 0;
720         ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
721                                 check_entry, name, size, &i);
722
723         if (ret != 0) {
724                 IPT_ENTRY_ITERATE(entry0, newinfo->size,
725                                   cleanup_entry, &i);
726                 return ret;
727         }
728
729         /* And one copy for every other CPU */
730         for_each_possible_cpu(i) {
731                 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
732                         memcpy(newinfo->entries[i], entry0, newinfo->size);
733         }
734
735         return ret;
736 }
737
738 /* Gets counters. */
739 static inline int
740 add_entry_to_counter(const struct ipt_entry *e,
741                      struct xt_counters total[],
742                      unsigned int *i)
743 {
744         ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
745
746         (*i)++;
747         return 0;
748 }
749
750 static inline int
751 set_entry_to_counter(const struct ipt_entry *e,
752                      struct ipt_counters total[],
753                      unsigned int *i)
754 {
755         SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
756
757         (*i)++;
758         return 0;
759 }
760
761 static void
762 get_counters(const struct xt_table_info *t,
763              struct xt_counters counters[])
764 {
765         unsigned int cpu;
766         unsigned int i;
767         unsigned int curcpu;
768
769         /* Instead of clearing (by a previous call to memset())
770          * the counters and using adds, we set the counters
771          * with data used by 'current' CPU
772          * We dont care about preemption here.
773          */
774         curcpu = raw_smp_processor_id();
775
776         i = 0;
777         IPT_ENTRY_ITERATE(t->entries[curcpu],
778                           t->size,
779                           set_entry_to_counter,
780                           counters,
781                           &i);
782
783         for_each_possible_cpu(cpu) {
784                 if (cpu == curcpu)
785                         continue;
786                 i = 0;
787                 IPT_ENTRY_ITERATE(t->entries[cpu],
788                                   t->size,
789                                   add_entry_to_counter,
790                                   counters,
791                                   &i);
792         }
793 }
794
795 static inline struct xt_counters * alloc_counters(struct ipt_table *table)
796 {
797         unsigned int countersize;
798         struct xt_counters *counters;
799         struct xt_table_info *private = table->private;
800
801         /* We need atomic snapshot of counters: rest doesn't change
802            (other than comefrom, which userspace doesn't care
803            about). */
804         countersize = sizeof(struct xt_counters) * private->number;
805         counters = vmalloc_node(countersize, numa_node_id());
806
807         if (counters == NULL)
808                 return ERR_PTR(-ENOMEM);
809
810         /* First, sum counters... */
811         write_lock_bh(&table->lock);
812         get_counters(private, counters);
813         write_unlock_bh(&table->lock);
814
815         return counters;
816 }
817
818 static int
819 copy_entries_to_user(unsigned int total_size,
820                      struct ipt_table *table,
821                      void __user *userptr)
822 {
823         unsigned int off, num;
824         struct ipt_entry *e;
825         struct xt_counters *counters;
826         struct xt_table_info *private = table->private;
827         int ret = 0;
828         void *loc_cpu_entry;
829
830         counters = alloc_counters(table);
831         if (IS_ERR(counters))
832                 return PTR_ERR(counters);
833
834         /* choose the copy that is on our node/cpu, ...
835          * This choice is lazy (because current thread is
836          * allowed to migrate to another cpu)
837          */
838         loc_cpu_entry = private->entries[raw_smp_processor_id()];
839         /* ... then copy entire thing ... */
840         if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
841                 ret = -EFAULT;
842                 goto free_counters;
843         }
844
845         /* FIXME: use iterator macros --RR */
846         /* ... then go back and fix counters and names */
847         for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
848                 unsigned int i;
849                 struct ipt_entry_match *m;
850                 struct ipt_entry_target *t;
851
852                 e = (struct ipt_entry *)(loc_cpu_entry + off);
853                 if (copy_to_user(userptr + off
854                                  + offsetof(struct ipt_entry, counters),
855                                  &counters[num],
856                                  sizeof(counters[num])) != 0) {
857                         ret = -EFAULT;
858                         goto free_counters;
859                 }
860
861                 for (i = sizeof(struct ipt_entry);
862                      i < e->target_offset;
863                      i += m->u.match_size) {
864                         m = (void *)e + i;
865
866                         if (copy_to_user(userptr + off + i
867                                          + offsetof(struct ipt_entry_match,
868                                                     u.user.name),
869                                          m->u.kernel.match->name,
870                                          strlen(m->u.kernel.match->name)+1)
871                             != 0) {
872                                 ret = -EFAULT;
873                                 goto free_counters;
874                         }
875                 }
876
877                 t = ipt_get_target(e);
878                 if (copy_to_user(userptr + off + e->target_offset
879                                  + offsetof(struct ipt_entry_target,
880                                             u.user.name),
881                                  t->u.kernel.target->name,
882                                  strlen(t->u.kernel.target->name)+1) != 0) {
883                         ret = -EFAULT;
884                         goto free_counters;
885                 }
886         }
887
888  free_counters:
889         vfree(counters);
890         return ret;
891 }
892
893 #ifdef CONFIG_COMPAT
894 struct compat_delta {
895         struct compat_delta *next;
896         u_int16_t offset;
897         short delta;
898 };
899
900 static struct compat_delta *compat_offsets = NULL;
901
902 static int compat_add_offset(u_int16_t offset, short delta)
903 {
904         struct compat_delta *tmp;
905
906         tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
907         if (!tmp)
908                 return -ENOMEM;
909         tmp->offset = offset;
910         tmp->delta = delta;
911         if (compat_offsets) {
912                 tmp->next = compat_offsets->next;
913                 compat_offsets->next = tmp;
914         } else {
915                 compat_offsets = tmp;
916                 tmp->next = NULL;
917         }
918         return 0;
919 }
920
921 static void compat_flush_offsets(void)
922 {
923         struct compat_delta *tmp, *next;
924
925         if (compat_offsets) {
926                 for(tmp = compat_offsets; tmp; tmp = next) {
927                         next = tmp->next;
928                         kfree(tmp);
929                 }
930                 compat_offsets = NULL;
931         }
932 }
933
934 static short compat_calc_jump(u_int16_t offset)
935 {
936         struct compat_delta *tmp;
937         short delta;
938
939         for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
940                 if (tmp->offset < offset)
941                         delta += tmp->delta;
942         return delta;
943 }
944
945 static void compat_standard_from_user(void *dst, void *src)
946 {
947         int v = *(compat_int_t *)src;
948
949         if (v > 0)
950                 v += compat_calc_jump(v);
951         memcpy(dst, &v, sizeof(v));
952 }
953
954 static int compat_standard_to_user(void __user *dst, void *src)
955 {
956         compat_int_t cv = *(int *)src;
957
958         if (cv > 0)
959                 cv -= compat_calc_jump(cv);
960         return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
961 }
962
963 static inline int
964 compat_calc_match(struct ipt_entry_match *m, int * size)
965 {
966         *size += xt_compat_match_offset(m->u.kernel.match);
967         return 0;
968 }
969
970 static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info,
971                 void *base, struct xt_table_info *newinfo)
972 {
973         struct ipt_entry_target *t;
974         u_int16_t entry_offset;
975         int off, i, ret;
976
977         off = 0;
978         entry_offset = (void *)e - base;
979         IPT_MATCH_ITERATE(e, compat_calc_match, &off);
980         t = ipt_get_target(e);
981         off += xt_compat_target_offset(t->u.kernel.target);
982         newinfo->size -= off;
983         ret = compat_add_offset(entry_offset, off);
984         if (ret)
985                 return ret;
986
987         for (i = 0; i< NF_IP_NUMHOOKS; i++) {
988                 if (info->hook_entry[i] && (e < (struct ipt_entry *)
989                                 (base + info->hook_entry[i])))
990                         newinfo->hook_entry[i] -= off;
991                 if (info->underflow[i] && (e < (struct ipt_entry *)
992                                 (base + info->underflow[i])))
993                         newinfo->underflow[i] -= off;
994         }
995         return 0;
996 }
997
998 static int compat_table_info(struct xt_table_info *info,
999                 struct xt_table_info *newinfo)
1000 {
1001         void *loc_cpu_entry;
1002         int i;
1003
1004         if (!newinfo || !info)
1005                 return -EINVAL;
1006
1007         memset(newinfo, 0, sizeof(struct xt_table_info));
1008         newinfo->size = info->size;
1009         newinfo->number = info->number;
1010         for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1011                 newinfo->hook_entry[i] = info->hook_entry[i];
1012                 newinfo->underflow[i] = info->underflow[i];
1013         }
1014         loc_cpu_entry = info->entries[raw_smp_processor_id()];
1015         return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1016                         compat_calc_entry, info, loc_cpu_entry, newinfo);
1017 }
1018 #endif
1019
1020 static int get_info(void __user *user, int *len, int compat)
1021 {
1022         char name[IPT_TABLE_MAXNAMELEN];
1023         struct ipt_table *t;
1024         int ret;
1025
1026         if (*len != sizeof(struct ipt_getinfo)) {
1027                 duprintf("length %u != %u\n", *len,
1028                         (unsigned int)sizeof(struct ipt_getinfo));
1029                 return -EINVAL;
1030         }
1031
1032         if (copy_from_user(name, user, sizeof(name)) != 0)
1033                 return -EFAULT;
1034
1035         name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1036 #ifdef CONFIG_COMPAT
1037         if (compat)
1038                 xt_compat_lock(AF_INET);
1039 #endif
1040         t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1041                         "iptable_%s", name);
1042         if (t && !IS_ERR(t)) {
1043                 struct ipt_getinfo info;
1044                 struct xt_table_info *private = t->private;
1045
1046 #ifdef CONFIG_COMPAT
1047                 if (compat) {
1048                         struct xt_table_info tmp;
1049                         ret = compat_table_info(private, &tmp);
1050                         compat_flush_offsets();
1051                         private =  &tmp;
1052                 }
1053 #endif
1054                 info.valid_hooks = t->valid_hooks;
1055                 memcpy(info.hook_entry, private->hook_entry,
1056                                 sizeof(info.hook_entry));
1057                 memcpy(info.underflow, private->underflow,
1058                                 sizeof(info.underflow));
1059                 info.num_entries = private->number;
1060                 info.size = private->size;
1061                 strcpy(info.name, name);
1062
1063                 if (copy_to_user(user, &info, *len) != 0)
1064                         ret = -EFAULT;
1065                 else
1066                         ret = 0;
1067
1068                 xt_table_unlock(t);
1069                 module_put(t->me);
1070         } else
1071                 ret = t ? PTR_ERR(t) : -ENOENT;
1072 #ifdef CONFIG_COMPAT
1073         if (compat)
1074                 xt_compat_unlock(AF_INET);
1075 #endif
1076         return ret;
1077 }
1078
1079 static int
1080 get_entries(struct ipt_get_entries __user *uptr, int *len)
1081 {
1082         int ret;
1083         struct ipt_get_entries get;
1084         struct ipt_table *t;
1085
1086         if (*len < sizeof(get)) {
1087                 duprintf("get_entries: %u < %d\n", *len,
1088                                 (unsigned int)sizeof(get));
1089                 return -EINVAL;
1090         }
1091         if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1092                 return -EFAULT;
1093         if (*len != sizeof(struct ipt_get_entries) + get.size) {
1094                 duprintf("get_entries: %u != %u\n", *len,
1095                                 (unsigned int)(sizeof(struct ipt_get_entries) +
1096                                 get.size));
1097                 return -EINVAL;
1098         }
1099
1100         t = xt_find_table_lock(AF_INET, get.name);
1101         if (t && !IS_ERR(t)) {
1102                 struct xt_table_info *private = t->private;
1103                 duprintf("t->private->number = %u\n",
1104                          private->number);
1105                 if (get.size == private->size)
1106                         ret = copy_entries_to_user(private->size,
1107                                                    t, uptr->entrytable);
1108                 else {
1109                         duprintf("get_entries: I've got %u not %u!\n",
1110                                  private->size,
1111                                  get.size);
1112                         ret = -EINVAL;
1113                 }
1114                 module_put(t->me);
1115                 xt_table_unlock(t);
1116         } else
1117                 ret = t ? PTR_ERR(t) : -ENOENT;
1118
1119         return ret;
1120 }
1121
1122 static int
1123 __do_replace(const char *name, unsigned int valid_hooks,
1124                 struct xt_table_info *newinfo, unsigned int num_counters,
1125                 void __user *counters_ptr)
1126 {
1127         int ret;
1128         struct ipt_table *t;
1129         struct xt_table_info *oldinfo;
1130         struct xt_counters *counters;
1131         void *loc_cpu_old_entry;
1132
1133         ret = 0;
1134         counters = vmalloc(num_counters * sizeof(struct xt_counters));
1135         if (!counters) {
1136                 ret = -ENOMEM;
1137                 goto out;
1138         }
1139
1140         t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1141                                     "iptable_%s", name);
1142         if (!t || IS_ERR(t)) {
1143                 ret = t ? PTR_ERR(t) : -ENOENT;
1144                 goto free_newinfo_counters_untrans;
1145         }
1146
1147         /* You lied! */
1148         if (valid_hooks != t->valid_hooks) {
1149                 duprintf("Valid hook crap: %08X vs %08X\n",
1150                          valid_hooks, t->valid_hooks);
1151                 ret = -EINVAL;
1152                 goto put_module;
1153         }
1154
1155         oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1156         if (!oldinfo)
1157                 goto put_module;
1158
1159         /* Update module usage count based on number of rules */
1160         duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1161                 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1162         if ((oldinfo->number > oldinfo->initial_entries) ||
1163             (newinfo->number <= oldinfo->initial_entries))
1164                 module_put(t->me);
1165         if ((oldinfo->number > oldinfo->initial_entries) &&
1166             (newinfo->number <= oldinfo->initial_entries))
1167                 module_put(t->me);
1168
1169         /* Get the old counters. */
1170         get_counters(oldinfo, counters);
1171         /* Decrease module usage counts and free resource */
1172         loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1173         IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1174         xt_free_table_info(oldinfo);
1175         if (copy_to_user(counters_ptr, counters,
1176                          sizeof(struct xt_counters) * num_counters) != 0)
1177                 ret = -EFAULT;
1178         vfree(counters);
1179         xt_table_unlock(t);
1180         return ret;
1181
1182  put_module:
1183         module_put(t->me);
1184         xt_table_unlock(t);
1185  free_newinfo_counters_untrans:
1186         vfree(counters);
1187  out:
1188         return ret;
1189 }
1190
1191 static int
1192 do_replace(void __user *user, unsigned int len)
1193 {
1194         int ret;
1195         struct ipt_replace tmp;
1196         struct xt_table_info *newinfo;
1197         void *loc_cpu_entry;
1198
1199         if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1200                 return -EFAULT;
1201
1202         /* Hack: Causes ipchains to give correct error msg --RR */
1203         if (len != sizeof(tmp) + tmp.size)
1204                 return -ENOPROTOOPT;
1205
1206         /* overflow check */
1207         if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1208                         SMP_CACHE_BYTES)
1209                 return -ENOMEM;
1210         if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1211                 return -ENOMEM;
1212
1213         newinfo = xt_alloc_table_info(tmp.size);
1214         if (!newinfo)
1215                 return -ENOMEM;
1216
1217         /* choose the copy that is our node/cpu */
1218         loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1219         if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1220                            tmp.size) != 0) {
1221                 ret = -EFAULT;
1222                 goto free_newinfo;
1223         }
1224
1225         ret = translate_table(tmp.name, tmp.valid_hooks,
1226                               newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1227                               tmp.hook_entry, tmp.underflow);
1228         if (ret != 0)
1229                 goto free_newinfo;
1230
1231         duprintf("ip_tables: Translated table\n");
1232
1233         ret = __do_replace(tmp.name, tmp.valid_hooks,
1234                               newinfo, tmp.num_counters,
1235                               tmp.counters);
1236         if (ret)
1237                 goto free_newinfo_untrans;
1238         return 0;
1239
1240  free_newinfo_untrans:
1241         IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1242  free_newinfo:
1243         xt_free_table_info(newinfo);
1244         return ret;
1245 }
1246
1247 /* We're lazy, and add to the first CPU; overflow works its fey magic
1248  * and everything is OK. */
1249 static inline int
1250 add_counter_to_entry(struct ipt_entry *e,
1251                      const struct xt_counters addme[],
1252                      unsigned int *i)
1253 {
1254 #if 0
1255         duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1256                  *i,
1257                  (long unsigned int)e->counters.pcnt,
1258                  (long unsigned int)e->counters.bcnt,
1259                  (long unsigned int)addme[*i].pcnt,
1260                  (long unsigned int)addme[*i].bcnt);
1261 #endif
1262
1263         ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1264
1265         (*i)++;
1266         return 0;
1267 }
1268
1269 static int
1270 do_add_counters(void __user *user, unsigned int len, int compat)
1271 {
1272         unsigned int i;
1273         struct xt_counters_info tmp;
1274         struct xt_counters *paddc;
1275         unsigned int num_counters;
1276         char *name;
1277         int size;
1278         void *ptmp;
1279         struct ipt_table *t;
1280         struct xt_table_info *private;
1281         int ret = 0;
1282         void *loc_cpu_entry;
1283 #ifdef CONFIG_COMPAT
1284         struct compat_xt_counters_info compat_tmp;
1285
1286         if (compat) {
1287                 ptmp = &compat_tmp;
1288                 size = sizeof(struct compat_xt_counters_info);
1289         } else
1290 #endif
1291         {
1292                 ptmp = &tmp;
1293                 size = sizeof(struct xt_counters_info);
1294         }
1295
1296         if (copy_from_user(ptmp, user, size) != 0)
1297                 return -EFAULT;
1298
1299 #ifdef CONFIG_COMPAT
1300         if (compat) {
1301                 num_counters = compat_tmp.num_counters;
1302                 name = compat_tmp.name;
1303         } else
1304 #endif
1305         {
1306                 num_counters = tmp.num_counters;
1307                 name = tmp.name;
1308         }
1309
1310         if (len != size + num_counters * sizeof(struct xt_counters))
1311                 return -EINVAL;
1312
1313         paddc = vmalloc_node(len - size, numa_node_id());
1314         if (!paddc)
1315                 return -ENOMEM;
1316
1317         if (copy_from_user(paddc, user + size, len - size) != 0) {
1318                 ret = -EFAULT;
1319                 goto free;
1320         }
1321
1322         t = xt_find_table_lock(AF_INET, name);
1323         if (!t || IS_ERR(t)) {
1324                 ret = t ? PTR_ERR(t) : -ENOENT;
1325                 goto free;
1326         }
1327
1328         write_lock_bh(&t->lock);
1329         private = t->private;
1330         if (private->number != num_counters) {
1331                 ret = -EINVAL;
1332                 goto unlock_up_free;
1333         }
1334
1335         i = 0;
1336         /* Choose the copy that is on our node */
1337         loc_cpu_entry = private->entries[raw_smp_processor_id()];
1338         IPT_ENTRY_ITERATE(loc_cpu_entry,
1339                           private->size,
1340                           add_counter_to_entry,
1341                           paddc,
1342                           &i);
1343  unlock_up_free:
1344         write_unlock_bh(&t->lock);
1345         xt_table_unlock(t);
1346         module_put(t->me);
1347  free:
1348         vfree(paddc);
1349
1350         return ret;
1351 }
1352
1353 #ifdef CONFIG_COMPAT
1354 struct compat_ipt_replace {
1355         char                    name[IPT_TABLE_MAXNAMELEN];
1356         u32                     valid_hooks;
1357         u32                     num_entries;
1358         u32                     size;
1359         u32                     hook_entry[NF_IP_NUMHOOKS];
1360         u32                     underflow[NF_IP_NUMHOOKS];
1361         u32                     num_counters;
1362         compat_uptr_t           counters;       /* struct ipt_counters * */
1363         struct compat_ipt_entry entries[0];
1364 };
1365
1366 static inline int compat_copy_match_to_user(struct ipt_entry_match *m,
1367                 void __user **dstptr, compat_uint_t *size)
1368 {
1369         return xt_compat_match_to_user(m, dstptr, size);
1370 }
1371
1372 static int compat_copy_entry_to_user(struct ipt_entry *e,
1373                 void __user **dstptr, compat_uint_t *size)
1374 {
1375         struct ipt_entry_target *t;
1376         struct compat_ipt_entry __user *ce;
1377         u_int16_t target_offset, next_offset;
1378         compat_uint_t origsize;
1379         int ret;
1380
1381         ret = -EFAULT;
1382         origsize = *size;
1383         ce = (struct compat_ipt_entry __user *)*dstptr;
1384         if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1385                 goto out;
1386
1387         *dstptr += sizeof(struct compat_ipt_entry);
1388         ret = IPT_MATCH_ITERATE(e, compat_copy_match_to_user, dstptr, size);
1389         target_offset = e->target_offset - (origsize - *size);
1390         if (ret)
1391                 goto out;
1392         t = ipt_get_target(e);
1393         ret = xt_compat_target_to_user(t, dstptr, size);
1394         if (ret)
1395                 goto out;
1396         ret = -EFAULT;
1397         next_offset = e->next_offset - (origsize - *size);
1398         if (put_user(target_offset, &ce->target_offset))
1399                 goto out;
1400         if (put_user(next_offset, &ce->next_offset))
1401                 goto out;
1402         return 0;
1403 out:
1404         return ret;
1405 }
1406
1407 static inline int
1408 compat_check_calc_match(struct ipt_entry_match *m,
1409             const char *name,
1410             const struct ipt_ip *ip,
1411             unsigned int hookmask,
1412             int *size, int *i)
1413 {
1414         struct ipt_match *match;
1415
1416         match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1417                                                    m->u.user.revision),
1418                                         "ipt_%s", m->u.user.name);
1419         if (IS_ERR(match) || !match) {
1420                 duprintf("compat_check_calc_match: `%s' not found\n",
1421                                 m->u.user.name);
1422                 return match ? PTR_ERR(match) : -ENOENT;
1423         }
1424         m->u.kernel.match = match;
1425         *size += xt_compat_match_offset(match);
1426
1427         (*i)++;
1428         return 0;
1429 }
1430
1431 static inline int
1432 check_compat_entry_size_and_hooks(struct ipt_entry *e,
1433                            struct xt_table_info *newinfo,
1434                            unsigned int *size,
1435                            unsigned char *base,
1436                            unsigned char *limit,
1437                            unsigned int *hook_entries,
1438                            unsigned int *underflows,
1439                            unsigned int *i,
1440                            const char *name)
1441 {
1442         struct ipt_entry_target *t;
1443         struct ipt_target *target;
1444         u_int16_t entry_offset;
1445         int ret, off, h, j;
1446
1447         duprintf("check_compat_entry_size_and_hooks %p\n", e);
1448         if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1449             || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1450                 duprintf("Bad offset %p, limit = %p\n", e, limit);
1451                 return -EINVAL;
1452         }
1453
1454         if (e->next_offset < sizeof(struct compat_ipt_entry) +
1455                         sizeof(struct compat_xt_entry_target)) {
1456                 duprintf("checking: element %p size %u\n",
1457                          e, e->next_offset);
1458                 return -EINVAL;
1459         }
1460
1461         if (!ip_checkentry(&e->ip)) {
1462                 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
1463                 return -EINVAL;
1464         }
1465
1466         off = 0;
1467         entry_offset = (void *)e - (void *)base;
1468         j = 0;
1469         ret = IPT_MATCH_ITERATE(e, compat_check_calc_match, name, &e->ip,
1470                         e->comefrom, &off, &j);
1471         if (ret != 0)
1472                 goto cleanup_matches;
1473
1474         t = ipt_get_target(e);
1475         target = try_then_request_module(xt_find_target(AF_INET,
1476                                                      t->u.user.name,
1477                                                      t->u.user.revision),
1478                                          "ipt_%s", t->u.user.name);
1479         if (IS_ERR(target) || !target) {
1480                 duprintf("check_entry: `%s' not found\n", t->u.user.name);
1481                 ret = target ? PTR_ERR(target) : -ENOENT;
1482                 goto cleanup_matches;
1483         }
1484         t->u.kernel.target = target;
1485
1486         off += xt_compat_target_offset(target);
1487         *size += off;
1488         ret = compat_add_offset(entry_offset, off);
1489         if (ret)
1490                 goto out;
1491
1492         /* Check hooks & underflows */
1493         for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1494                 if ((unsigned char *)e - base == hook_entries[h])
1495                         newinfo->hook_entry[h] = hook_entries[h];
1496                 if ((unsigned char *)e - base == underflows[h])
1497                         newinfo->underflow[h] = underflows[h];
1498         }
1499
1500         /* Clear counters and comefrom */
1501         e->counters = ((struct ipt_counters) { 0, 0 });
1502         e->comefrom = 0;
1503
1504         (*i)++;
1505         return 0;
1506
1507 out:
1508         module_put(t->u.kernel.target->me);
1509 cleanup_matches:
1510         IPT_MATCH_ITERATE(e, cleanup_match, &j);
1511         return ret;
1512 }
1513
1514 static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
1515         void **dstptr, compat_uint_t *size, const char *name,
1516         const struct ipt_ip *ip, unsigned int hookmask, int *i)
1517 {
1518         struct ipt_entry_match *dm;
1519         struct ipt_match *match;
1520         int ret;
1521
1522         dm = (struct ipt_entry_match *)*dstptr;
1523         match = m->u.kernel.match;
1524         xt_compat_match_from_user(m, dstptr, size);
1525
1526         ret = xt_check_match(match, AF_INET, dm->u.match_size - sizeof(*dm),
1527                              name, hookmask, ip->proto,
1528                              ip->invflags & IPT_INV_PROTO);
1529         if (ret)
1530                 goto err;
1531
1532         if (m->u.kernel.match->checkentry
1533             && !m->u.kernel.match->checkentry(name, ip, match, dm->data,
1534                                               hookmask)) {
1535                 duprintf("ip_tables: check failed for `%s'.\n",
1536                          m->u.kernel.match->name);
1537                 ret = -EINVAL;
1538                 goto err;
1539         }
1540         (*i)++;
1541         return 0;
1542
1543 err:
1544         module_put(m->u.kernel.match->me);
1545         return ret;
1546 }
1547
1548 static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
1549         unsigned int *size, const char *name,
1550         struct xt_table_info *newinfo, unsigned char *base)
1551 {
1552         struct ipt_entry_target *t;
1553         struct ipt_target *target;
1554         struct ipt_entry *de;
1555         unsigned int origsize;
1556         int ret, h, j;
1557
1558         ret = 0;
1559         origsize = *size;
1560         de = (struct ipt_entry *)*dstptr;
1561         memcpy(de, e, sizeof(struct ipt_entry));
1562
1563         j = 0;
1564         *dstptr += sizeof(struct compat_ipt_entry);
1565         ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
1566                         name, &de->ip, de->comefrom, &j);
1567         if (ret)
1568                 goto cleanup_matches;
1569         de->target_offset = e->target_offset - (origsize - *size);
1570         t = ipt_get_target(e);
1571         target = t->u.kernel.target;
1572         xt_compat_target_from_user(t, dstptr, size);
1573
1574         de->next_offset = e->next_offset - (origsize - *size);
1575         for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1576                 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1577                         newinfo->hook_entry[h] -= origsize - *size;
1578                 if ((unsigned char *)de - base < newinfo->underflow[h])
1579                         newinfo->underflow[h] -= origsize - *size;
1580         }
1581
1582         t = ipt_get_target(de);
1583         target = t->u.kernel.target;
1584         ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
1585                               name, e->comefrom, e->ip.proto,
1586                               e->ip.invflags & IPT_INV_PROTO);
1587         if (ret)
1588                 goto err;
1589
1590         ret = -EINVAL;
1591         if (t->u.kernel.target == &ipt_standard_target) {
1592                 if (!standard_check(t, *size))
1593                         goto err;
1594         } else if (t->u.kernel.target->checkentry
1595                    && !t->u.kernel.target->checkentry(name, de, target,
1596                                                       t->data, de->comefrom)) {
1597                 duprintf("ip_tables: compat: check failed for `%s'.\n",
1598                          t->u.kernel.target->name);
1599                 goto err;
1600         }
1601         ret = 0;
1602         return ret;
1603
1604 err:
1605         module_put(t->u.kernel.target->me);
1606 cleanup_matches:
1607         IPT_MATCH_ITERATE(e, cleanup_match, &j);
1608         return ret;
1609 }
1610
1611 static int
1612 translate_compat_table(const char *name,
1613                 unsigned int valid_hooks,
1614                 struct xt_table_info **pinfo,
1615                 void **pentry0,
1616                 unsigned int total_size,
1617                 unsigned int number,
1618                 unsigned int *hook_entries,
1619                 unsigned int *underflows)
1620 {
1621         unsigned int i;
1622         struct xt_table_info *newinfo, *info;
1623         void *pos, *entry0, *entry1;
1624         unsigned int size;
1625         int ret;
1626
1627         info = *pinfo;
1628         entry0 = *pentry0;
1629         size = total_size;
1630         info->number = number;
1631
1632         /* Init all hooks to impossible value. */
1633         for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1634                 info->hook_entry[i] = 0xFFFFFFFF;
1635                 info->underflow[i] = 0xFFFFFFFF;
1636         }
1637
1638         duprintf("translate_compat_table: size %u\n", info->size);
1639         i = 0;
1640         xt_compat_lock(AF_INET);
1641         /* Walk through entries, checking offsets. */
1642         ret = IPT_ENTRY_ITERATE(entry0, total_size,
1643                                 check_compat_entry_size_and_hooks,
1644                                 info, &size, entry0,
1645                                 entry0 + total_size,
1646                                 hook_entries, underflows, &i, name);
1647         if (ret != 0)
1648                 goto out_unlock;
1649
1650         ret = -EINVAL;
1651         if (i != number) {
1652                 duprintf("translate_compat_table: %u not %u entries\n",
1653                          i, number);
1654                 goto out_unlock;
1655         }
1656
1657         /* Check hooks all assigned */
1658         for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1659                 /* Only hooks which are valid */
1660                 if (!(valid_hooks & (1 << i)))
1661                         continue;
1662                 if (info->hook_entry[i] == 0xFFFFFFFF) {
1663                         duprintf("Invalid hook entry %u %u\n",
1664                                  i, hook_entries[i]);
1665                         goto out_unlock;
1666                 }
1667                 if (info->underflow[i] == 0xFFFFFFFF) {
1668                         duprintf("Invalid underflow %u %u\n",
1669                                  i, underflows[i]);
1670                         goto out_unlock;
1671                 }
1672         }
1673
1674         ret = -ENOMEM;
1675         newinfo = xt_alloc_table_info(size);
1676         if (!newinfo)
1677                 goto out_unlock;
1678
1679         newinfo->number = number;
1680         for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1681                 newinfo->hook_entry[i] = info->hook_entry[i];
1682                 newinfo->underflow[i] = info->underflow[i];
1683         }
1684         entry1 = newinfo->entries[raw_smp_processor_id()];
1685         pos = entry1;
1686         size =  total_size;
1687         ret = IPT_ENTRY_ITERATE(entry0, total_size,
1688                         compat_copy_entry_from_user, &pos, &size,
1689                         name, newinfo, entry1);
1690         compat_flush_offsets();
1691         xt_compat_unlock(AF_INET);
1692         if (ret)
1693                 goto free_newinfo;
1694
1695         ret = -ELOOP;
1696         if (!mark_source_chains(newinfo, valid_hooks, entry1))
1697                 goto free_newinfo;
1698
1699         /* And one copy for every other CPU */
1700         for_each_possible_cpu(i)
1701                 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1702                         memcpy(newinfo->entries[i], entry1, newinfo->size);
1703
1704         *pinfo = newinfo;
1705         *pentry0 = entry1;
1706         xt_free_table_info(info);
1707         return 0;
1708
1709 free_newinfo:
1710         xt_free_table_info(newinfo);
1711 out:
1712         return ret;
1713 out_unlock:
1714         xt_compat_unlock(AF_INET);
1715         goto out;
1716 }
1717
1718 static int
1719 compat_do_replace(void __user *user, unsigned int len)
1720 {
1721         int ret;
1722         struct compat_ipt_replace tmp;
1723         struct xt_table_info *newinfo;
1724         void *loc_cpu_entry;
1725
1726         if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1727                 return -EFAULT;
1728
1729         /* Hack: Causes ipchains to give correct error msg --RR */
1730         if (len != sizeof(tmp) + tmp.size)
1731                 return -ENOPROTOOPT;
1732
1733         /* overflow check */
1734         if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1735                         SMP_CACHE_BYTES)
1736                 return -ENOMEM;
1737         if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1738                 return -ENOMEM;
1739
1740         newinfo = xt_alloc_table_info(tmp.size);
1741         if (!newinfo)
1742                 return -ENOMEM;
1743
1744         /* choose the copy that is our node/cpu */
1745         loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1746         if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1747                            tmp.size) != 0) {
1748                 ret = -EFAULT;
1749                 goto free_newinfo;
1750         }
1751
1752         ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1753                               &newinfo, &loc_cpu_entry, tmp.size,
1754                               tmp.num_entries, tmp.hook_entry, tmp.underflow);
1755         if (ret != 0)
1756                 goto free_newinfo;
1757
1758         duprintf("compat_do_replace: Translated table\n");
1759
1760         ret = __do_replace(tmp.name, tmp.valid_hooks,
1761                               newinfo, tmp.num_counters,
1762                               compat_ptr(tmp.counters));
1763         if (ret)
1764                 goto free_newinfo_untrans;
1765         return 0;
1766
1767  free_newinfo_untrans:
1768         IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1769  free_newinfo:
1770         xt_free_table_info(newinfo);
1771         return ret;
1772 }
1773
1774 static int
1775 compat_do_ipt_set_ctl(struct sock *sk,  int cmd, void __user *user,
1776                 unsigned int len)
1777 {
1778         int ret;
1779
1780         if (!capable(CAP_NET_ADMIN))
1781                 return -EPERM;
1782
1783         switch (cmd) {
1784         case IPT_SO_SET_REPLACE:
1785                 ret = compat_do_replace(user, len);
1786                 break;
1787
1788         case IPT_SO_SET_ADD_COUNTERS:
1789                 ret = do_add_counters(user, len, 1);
1790                 break;
1791
1792         default:
1793                 duprintf("do_ipt_set_ctl:  unknown request %i\n", cmd);
1794                 ret = -EINVAL;
1795         }
1796
1797         return ret;
1798 }
1799
1800 struct compat_ipt_get_entries
1801 {
1802         char name[IPT_TABLE_MAXNAMELEN];
1803         compat_uint_t size;
1804         struct compat_ipt_entry entrytable[0];
1805 };
1806
1807 static int compat_copy_entries_to_user(unsigned int total_size,
1808                      struct ipt_table *table, void __user *userptr)
1809 {
1810         unsigned int off, num;
1811         struct compat_ipt_entry e;
1812         struct xt_counters *counters;
1813         struct xt_table_info *private = table->private;
1814         void __user *pos;
1815         unsigned int size;
1816         int ret = 0;
1817         void *loc_cpu_entry;
1818
1819         counters = alloc_counters(table);
1820         if (IS_ERR(counters))
1821                 return PTR_ERR(counters);
1822
1823         /* choose the copy that is on our node/cpu, ...
1824          * This choice is lazy (because current thread is
1825          * allowed to migrate to another cpu)
1826          */
1827         loc_cpu_entry = private->entries[raw_smp_processor_id()];
1828         pos = userptr;
1829         size = total_size;
1830         ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1831                         compat_copy_entry_to_user, &pos, &size);
1832         if (ret)
1833                 goto free_counters;
1834
1835         /* ... then go back and fix counters and names */
1836         for (off = 0, num = 0; off < size; off += e.next_offset, num++) {
1837                 unsigned int i;
1838                 struct ipt_entry_match m;
1839                 struct ipt_entry_target t;
1840
1841                 ret = -EFAULT;
1842                 if (copy_from_user(&e, userptr + off,
1843                                         sizeof(struct compat_ipt_entry)))
1844                         goto free_counters;
1845                 if (copy_to_user(userptr + off +
1846                         offsetof(struct compat_ipt_entry, counters),
1847                          &counters[num], sizeof(counters[num])))
1848                         goto free_counters;
1849
1850                 for (i = sizeof(struct compat_ipt_entry);
1851                                 i < e.target_offset; i += m.u.match_size) {
1852                         if (copy_from_user(&m, userptr + off + i,
1853                                         sizeof(struct ipt_entry_match)))
1854                                 goto free_counters;
1855                         if (copy_to_user(userptr + off + i +
1856                                 offsetof(struct ipt_entry_match, u.user.name),
1857                                 m.u.kernel.match->name,
1858                                 strlen(m.u.kernel.match->name) + 1))
1859                                 goto free_counters;
1860                 }
1861
1862                 if (copy_from_user(&t, userptr + off + e.target_offset,
1863                                         sizeof(struct ipt_entry_target)))
1864                         goto free_counters;
1865                 if (copy_to_user(userptr + off + e.target_offset +
1866                         offsetof(struct ipt_entry_target, u.user.name),
1867                         t.u.kernel.target->name,
1868                         strlen(t.u.kernel.target->name) + 1))
1869                         goto free_counters;
1870         }
1871         ret = 0;
1872 free_counters:
1873         vfree(counters);
1874         return ret;
1875 }
1876
1877 static int
1878 compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1879 {
1880         int ret;
1881         struct compat_ipt_get_entries get;
1882         struct ipt_table *t;
1883
1884
1885         if (*len < sizeof(get)) {
1886                 duprintf("compat_get_entries: %u < %u\n",
1887                                 *len, (unsigned int)sizeof(get));
1888                 return -EINVAL;
1889         }
1890
1891         if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1892                 return -EFAULT;
1893
1894         if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1895                 duprintf("compat_get_entries: %u != %u\n", *len,
1896                         (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1897                         get.size));
1898                 return -EINVAL;
1899         }
1900
1901         xt_compat_lock(AF_INET);
1902         t = xt_find_table_lock(AF_INET, get.name);
1903         if (t && !IS_ERR(t)) {
1904                 struct xt_table_info *private = t->private;
1905                 struct xt_table_info info;
1906                 duprintf("t->private->number = %u\n",
1907                          private->number);
1908                 ret = compat_table_info(private, &info);
1909                 if (!ret && get.size == info.size) {
1910                         ret = compat_copy_entries_to_user(private->size,
1911                                                    t, uptr->entrytable);
1912                 } else if (!ret) {
1913                         duprintf("compat_get_entries: I've got %u not %u!\n",
1914                                  private->size,
1915                                  get.size);
1916                         ret = -EINVAL;
1917                 }
1918                 compat_flush_offsets();
1919                 module_put(t->me);
1920                 xt_table_unlock(t);
1921         } else
1922                 ret = t ? PTR_ERR(t) : -ENOENT;
1923
1924         xt_compat_unlock(AF_INET);
1925         return ret;
1926 }
1927
1928 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1929
1930 static int
1931 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1932 {
1933         int ret;
1934
1935         if (!capable(CAP_NET_ADMIN))
1936                 return -EPERM;
1937
1938         switch (cmd) {
1939         case IPT_SO_GET_INFO:
1940                 ret = get_info(user, len, 1);
1941                 break;
1942         case IPT_SO_GET_ENTRIES:
1943                 ret = compat_get_entries(user, len);
1944                 break;
1945         default:
1946                 ret = do_ipt_get_ctl(sk, cmd, user, len);
1947         }
1948         return ret;
1949 }
1950 #endif
1951
1952 static int
1953 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1954 {
1955         int ret;
1956
1957         if (!capable(CAP_NET_ADMIN))
1958                 return -EPERM;
1959
1960         switch (cmd) {
1961         case IPT_SO_SET_REPLACE:
1962                 ret = do_replace(user, len);
1963                 break;
1964
1965         case IPT_SO_SET_ADD_COUNTERS:
1966                 ret = do_add_counters(user, len, 0);
1967                 break;
1968
1969         default:
1970                 duprintf("do_ipt_set_ctl:  unknown request %i\n", cmd);
1971                 ret = -EINVAL;
1972         }
1973
1974         return ret;
1975 }
1976
1977 static int
1978 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1979 {
1980         int ret;
1981
1982         if (!capable(CAP_NET_ADMIN))
1983                 return -EPERM;
1984
1985         switch (cmd) {
1986         case IPT_SO_GET_INFO:
1987                 ret = get_info(user, len, 0);
1988                 break;
1989
1990         case IPT_SO_GET_ENTRIES:
1991                 ret = get_entries(user, len);
1992                 break;
1993
1994         case IPT_SO_GET_REVISION_MATCH:
1995         case IPT_SO_GET_REVISION_TARGET: {
1996                 struct ipt_get_revision rev;
1997                 int target;
1998
1999                 if (*len != sizeof(rev)) {
2000                         ret = -EINVAL;
2001                         break;
2002                 }
2003                 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2004                         ret = -EFAULT;
2005                         break;
2006                 }
2007
2008                 if (cmd == IPT_SO_GET_REVISION_TARGET)
2009                         target = 1;
2010                 else
2011                         target = 0;
2012
2013                 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2014                                                          rev.revision,
2015                                                          target, &ret),
2016                                         "ipt_%s", rev.name);
2017                 break;
2018         }
2019
2020         default:
2021                 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2022                 ret = -EINVAL;
2023         }
2024
2025         return ret;
2026 }
2027
2028 int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2029 {
2030         int ret;
2031         struct xt_table_info *newinfo;
2032         static struct xt_table_info bootstrap
2033                 = { 0, 0, 0, { 0 }, { 0 }, { } };
2034         void *loc_cpu_entry;
2035
2036         newinfo = xt_alloc_table_info(repl->size);
2037         if (!newinfo)
2038                 return -ENOMEM;
2039
2040         /* choose the copy on our node/cpu
2041          * but dont care of preemption
2042          */
2043         loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2044         memcpy(loc_cpu_entry, repl->entries, repl->size);
2045
2046         ret = translate_table(table->name, table->valid_hooks,
2047                               newinfo, loc_cpu_entry, repl->size,
2048                               repl->num_entries,
2049                               repl->hook_entry,
2050                               repl->underflow);
2051         if (ret != 0) {
2052                 xt_free_table_info(newinfo);
2053                 return ret;
2054         }
2055
2056         ret = xt_register_table(table, &bootstrap, newinfo);
2057         if (ret != 0) {
2058                 xt_free_table_info(newinfo);
2059                 return ret;
2060         }
2061
2062         return 0;
2063 }
2064
2065 void ipt_unregister_table(struct ipt_table *table)
2066 {
2067         struct xt_table_info *private;
2068         void *loc_cpu_entry;
2069
2070         private = xt_unregister_table(table);
2071
2072         /* Decrease module usage counts and free resources */
2073         loc_cpu_entry = private->entries[raw_smp_processor_id()];
2074         IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2075         xt_free_table_info(private);
2076 }
2077
2078 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2079 static inline int
2080 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2081                      u_int8_t type, u_int8_t code,
2082                      int invert)
2083 {
2084         return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
2085                 ^ invert;
2086 }
2087
2088 static int
2089 icmp_match(const struct sk_buff *skb,
2090            const struct net_device *in,
2091            const struct net_device *out,
2092            const struct xt_match *match,
2093            const void *matchinfo,
2094            int offset,
2095            unsigned int protoff,
2096            int *hotdrop)
2097 {
2098         struct icmphdr _icmph, *ic;
2099         const struct ipt_icmp *icmpinfo = matchinfo;
2100
2101         /* Must not be a fragment. */
2102         if (offset)
2103                 return 0;
2104
2105         ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2106         if (ic == NULL) {
2107                 /* We've been asked to examine this packet, and we
2108                  * can't.  Hence, no choice but to drop.
2109                  */
2110                 duprintf("Dropping evil ICMP tinygram.\n");
2111                 *hotdrop = 1;
2112                 return 0;
2113         }
2114
2115         return icmp_type_code_match(icmpinfo->type,
2116                                     icmpinfo->code[0],
2117                                     icmpinfo->code[1],
2118                                     ic->type, ic->code,
2119                                     !!(icmpinfo->invflags&IPT_ICMP_INV));
2120 }
2121
2122 /* Called when user tries to insert an entry of this type. */
2123 static int
2124 icmp_checkentry(const char *tablename,
2125            const void *info,
2126            const struct xt_match *match,
2127            void *matchinfo,
2128            unsigned int hook_mask)
2129 {
2130         const struct ipt_icmp *icmpinfo = matchinfo;
2131
2132         /* Must specify no unknown invflags */
2133         return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2134 }
2135
2136 /* The built-in targets: standard (NULL) and error. */
2137 static struct ipt_target ipt_standard_target = {
2138         .name           = IPT_STANDARD_TARGET,
2139         .targetsize     = sizeof(int),
2140         .family         = AF_INET,
2141 #ifdef CONFIG_COMPAT
2142         .compatsize     = sizeof(compat_int_t),
2143         .compat_from_user = compat_standard_from_user,
2144         .compat_to_user = compat_standard_to_user,
2145 #endif
2146 };
2147
2148 static struct ipt_target ipt_error_target = {
2149         .name           = IPT_ERROR_TARGET,
2150         .target         = ipt_error,
2151         .targetsize     = IPT_FUNCTION_MAXNAMELEN,
2152         .family         = AF_INET,
2153 };
2154
2155 static struct nf_sockopt_ops ipt_sockopts = {
2156         .pf             = PF_INET,
2157         .set_optmin     = IPT_BASE_CTL,
2158         .set_optmax     = IPT_SO_SET_MAX+1,
2159         .set            = do_ipt_set_ctl,
2160 #ifdef CONFIG_COMPAT
2161         .compat_set     = compat_do_ipt_set_ctl,
2162 #endif
2163         .get_optmin     = IPT_BASE_CTL,
2164         .get_optmax     = IPT_SO_GET_MAX+1,
2165         .get            = do_ipt_get_ctl,
2166 #ifdef CONFIG_COMPAT
2167         .compat_get     = compat_do_ipt_get_ctl,
2168 #endif
2169 };
2170
2171 static struct ipt_match icmp_matchstruct = {
2172         .name           = "icmp",
2173         .match          = icmp_match,
2174         .matchsize      = sizeof(struct ipt_icmp),
2175         .proto          = IPPROTO_ICMP,
2176         .family         = AF_INET,
2177         .checkentry     = icmp_checkentry,
2178 };
2179
2180 static int __init ip_tables_init(void)
2181 {
2182         int ret;
2183
2184         ret = xt_proto_init(AF_INET);
2185         if (ret < 0)
2186                 goto err1;
2187
2188         /* Noone else will be downing sem now, so we won't sleep */
2189         ret = xt_register_target(&ipt_standard_target);
2190         if (ret < 0)
2191                 goto err2;
2192         ret = xt_register_target(&ipt_error_target);
2193         if (ret < 0)
2194                 goto err3;
2195         ret = xt_register_match(&icmp_matchstruct);
2196         if (ret < 0)
2197                 goto err4;
2198
2199         /* Register setsockopt */
2200         ret = nf_register_sockopt(&ipt_sockopts);
2201         if (ret < 0)
2202                 goto err5;
2203
2204         printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2205         return 0;
2206
2207 err5:
2208         xt_unregister_match(&icmp_matchstruct);
2209 err4:
2210         xt_unregister_target(&ipt_error_target);
2211 err3:
2212         xt_unregister_target(&ipt_standard_target);
2213 err2:
2214         xt_proto_fini(AF_INET);
2215 err1:
2216         return ret;
2217 }
2218
2219 static void __exit ip_tables_fini(void)
2220 {
2221         nf_unregister_sockopt(&ipt_sockopts);
2222
2223         xt_unregister_match(&icmp_matchstruct);
2224         xt_unregister_target(&ipt_error_target);
2225         xt_unregister_target(&ipt_standard_target);
2226
2227         xt_proto_fini(AF_INET);
2228 }
2229
2230 EXPORT_SYMBOL(ipt_register_table);
2231 EXPORT_SYMBOL(ipt_unregister_table);
2232 EXPORT_SYMBOL(ipt_do_table);
2233 module_init(ip_tables_init);
2234 module_exit(ip_tables_fini);