[NETFILTER]: more sparse fixes
[safe/jmp/linux-2.6] / net / netfilter / xt_hashlimit.c
1 /* iptables match extension to limit the number of packets per second
2  * seperately for each hashbucket (sourceip/sourceport/dstip/dstport)
3  *
4  * (C) 2003-2004 by Harald Welte <laforge@netfilter.org>
5  *
6  * $Id: ipt_hashlimit.c 3244 2004-10-20 16:24:29Z laforge@netfilter.org $
7  *
8  * Development of this code was funded by Astaro AG, http://www.astaro.com/
9  */
10 #include <linux/module.h>
11 #include <linux/spinlock.h>
12 #include <linux/random.h>
13 #include <linux/jhash.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/proc_fs.h>
17 #include <linux/seq_file.h>
18 #include <linux/list.h>
19 #include <linux/skbuff.h>
20 #include <linux/mm.h>
21 #include <linux/in.h>
22 #include <linux/ip.h>
23 #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
24 #include <linux/ipv6.h>
25 #include <net/ipv6.h>
26 #endif
27
28 #include <net/net_namespace.h>
29
30 #include <linux/netfilter/x_tables.h>
31 #include <linux/netfilter_ipv4/ip_tables.h>
32 #include <linux/netfilter_ipv6/ip6_tables.h>
33 #include <linux/netfilter/xt_hashlimit.h>
34 #include <linux/mutex.h>
35
36 MODULE_LICENSE("GPL");
37 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
38 MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match");
39 MODULE_ALIAS("ipt_hashlimit");
40 MODULE_ALIAS("ip6t_hashlimit");
41
42 /* need to declare this at the top */
43 static struct proc_dir_entry *hashlimit_procdir4;
44 static struct proc_dir_entry *hashlimit_procdir6;
45 static const struct file_operations dl_file_ops;
46
47 /* hash table crap */
48 struct dsthash_dst {
49         union {
50                 struct {
51                         __be32 src;
52                         __be32 dst;
53                 } ip;
54 #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
55                 struct {
56                         __be32 src[4];
57                         __be32 dst[4];
58                 } ip6;
59 #endif
60         } addr;
61         __be16 src_port;
62         __be16 dst_port;
63 };
64
65 struct dsthash_ent {
66         /* static / read-only parts in the beginning */
67         struct hlist_node node;
68         struct dsthash_dst dst;
69
70         /* modified structure members in the end */
71         unsigned long expires;          /* precalculated expiry time */
72         struct {
73                 unsigned long prev;     /* last modification */
74                 u_int32_t credit;
75                 u_int32_t credit_cap, cost;
76         } rateinfo;
77 };
78
79 struct xt_hashlimit_htable {
80         struct hlist_node node;         /* global list of all htables */
81         atomic_t use;
82         int family;
83
84         struct hashlimit_cfg cfg;       /* config */
85
86         /* used internally */
87         spinlock_t lock;                /* lock for list_head */
88         u_int32_t rnd;                  /* random seed for hash */
89         int rnd_initialized;
90         unsigned int count;             /* number entries in table */
91         struct timer_list timer;        /* timer for gc */
92
93         /* seq_file stuff */
94         struct proc_dir_entry *pde;
95
96         struct hlist_head hash[0];      /* hashtable itself */
97 };
98
99 static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */
100 static DEFINE_MUTEX(hlimit_mutex);      /* additional checkentry protection */
101 static HLIST_HEAD(hashlimit_htables);
102 static struct kmem_cache *hashlimit_cachep __read_mostly;
103
104 static inline bool dst_cmp(const struct dsthash_ent *ent,
105                            const struct dsthash_dst *b)
106 {
107         return !memcmp(&ent->dst, b, sizeof(ent->dst));
108 }
109
110 static u_int32_t
111 hash_dst(const struct xt_hashlimit_htable *ht, const struct dsthash_dst *dst)
112 {
113         u_int32_t hash = jhash2((const u32 *)dst,
114                                 sizeof(*dst)/sizeof(u32),
115                                 ht->rnd);
116         /*
117          * Instead of returning hash % ht->cfg.size (implying a divide)
118          * we return the high 32 bits of the (hash * ht->cfg.size) that will
119          * give results between [0 and cfg.size-1] and same hash distribution,
120          * but using a multiply, less expensive than a divide
121          */
122         return ((u64)hash * ht->cfg.size) >> 32;
123 }
124
125 static struct dsthash_ent *
126 dsthash_find(const struct xt_hashlimit_htable *ht,
127              const struct dsthash_dst *dst)
128 {
129         struct dsthash_ent *ent;
130         struct hlist_node *pos;
131         u_int32_t hash = hash_dst(ht, dst);
132
133         if (!hlist_empty(&ht->hash[hash])) {
134                 hlist_for_each_entry(ent, pos, &ht->hash[hash], node)
135                         if (dst_cmp(ent, dst))
136                                 return ent;
137         }
138         return NULL;
139 }
140
141 /* allocate dsthash_ent, initialize dst, put in htable and lock it */
142 static struct dsthash_ent *
143 dsthash_alloc_init(struct xt_hashlimit_htable *ht,
144                    const struct dsthash_dst *dst)
145 {
146         struct dsthash_ent *ent;
147
148         /* initialize hash with random val at the time we allocate
149          * the first hashtable entry */
150         if (!ht->rnd_initialized) {
151                 get_random_bytes(&ht->rnd, 4);
152                 ht->rnd_initialized = 1;
153         }
154
155         if (ht->cfg.max && ht->count >= ht->cfg.max) {
156                 /* FIXME: do something. question is what.. */
157                 if (net_ratelimit())
158                         printk(KERN_WARNING
159                                 "xt_hashlimit: max count of %u reached\n",
160                                 ht->cfg.max);
161                 return NULL;
162         }
163
164         ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC);
165         if (!ent) {
166                 if (net_ratelimit())
167                         printk(KERN_ERR
168                                 "xt_hashlimit: can't allocate dsthash_ent\n");
169                 return NULL;
170         }
171         memcpy(&ent->dst, dst, sizeof(ent->dst));
172
173         hlist_add_head(&ent->node, &ht->hash[hash_dst(ht, dst)]);
174         ht->count++;
175         return ent;
176 }
177
178 static inline void
179 dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
180 {
181         hlist_del(&ent->node);
182         kmem_cache_free(hashlimit_cachep, ent);
183         ht->count--;
184 }
185 static void htable_gc(unsigned long htlong);
186
187 static int htable_create(struct xt_hashlimit_info *minfo, int family)
188 {
189         struct xt_hashlimit_htable *hinfo;
190         unsigned int size;
191         unsigned int i;
192
193         if (minfo->cfg.size)
194                 size = minfo->cfg.size;
195         else {
196                 size = ((num_physpages << PAGE_SHIFT) / 16384) /
197                        sizeof(struct list_head);
198                 if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
199                         size = 8192;
200                 if (size < 16)
201                         size = 16;
202         }
203         /* FIXME: don't use vmalloc() here or anywhere else -HW */
204         hinfo = vmalloc(sizeof(struct xt_hashlimit_htable) +
205                         sizeof(struct list_head) * size);
206         if (!hinfo) {
207                 printk(KERN_ERR "xt_hashlimit: unable to create hashtable\n");
208                 return -1;
209         }
210         minfo->hinfo = hinfo;
211
212         /* copy match config into hashtable config */
213         memcpy(&hinfo->cfg, &minfo->cfg, sizeof(hinfo->cfg));
214         hinfo->cfg.size = size;
215         if (!hinfo->cfg.max)
216                 hinfo->cfg.max = 8 * hinfo->cfg.size;
217         else if (hinfo->cfg.max < hinfo->cfg.size)
218                 hinfo->cfg.max = hinfo->cfg.size;
219
220         for (i = 0; i < hinfo->cfg.size; i++)
221                 INIT_HLIST_HEAD(&hinfo->hash[i]);
222
223         atomic_set(&hinfo->use, 1);
224         hinfo->count = 0;
225         hinfo->family = family;
226         hinfo->rnd_initialized = 0;
227         spin_lock_init(&hinfo->lock);
228         hinfo->pde = create_proc_entry(minfo->name, 0,
229                                        family == AF_INET ? hashlimit_procdir4 :
230                                                            hashlimit_procdir6);
231         if (!hinfo->pde) {
232                 vfree(hinfo);
233                 return -1;
234         }
235         hinfo->pde->proc_fops = &dl_file_ops;
236         hinfo->pde->data = hinfo;
237
238         setup_timer(&hinfo->timer, htable_gc, (unsigned long )hinfo);
239         hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
240         add_timer(&hinfo->timer);
241
242         spin_lock_bh(&hashlimit_lock);
243         hlist_add_head(&hinfo->node, &hashlimit_htables);
244         spin_unlock_bh(&hashlimit_lock);
245
246         return 0;
247 }
248
249 static bool select_all(const struct xt_hashlimit_htable *ht,
250                        const struct dsthash_ent *he)
251 {
252         return 1;
253 }
254
255 static bool select_gc(const struct xt_hashlimit_htable *ht,
256                       const struct dsthash_ent *he)
257 {
258         return time_after_eq(jiffies, he->expires);
259 }
260
261 static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
262                         bool (*select)(const struct xt_hashlimit_htable *ht,
263                                       const struct dsthash_ent *he))
264 {
265         unsigned int i;
266
267         /* lock hash table and iterate over it */
268         spin_lock_bh(&ht->lock);
269         for (i = 0; i < ht->cfg.size; i++) {
270                 struct dsthash_ent *dh;
271                 struct hlist_node *pos, *n;
272                 hlist_for_each_entry_safe(dh, pos, n, &ht->hash[i], node) {
273                         if ((*select)(ht, dh))
274                                 dsthash_free(ht, dh);
275                 }
276         }
277         spin_unlock_bh(&ht->lock);
278 }
279
280 /* hash table garbage collector, run by timer */
281 static void htable_gc(unsigned long htlong)
282 {
283         struct xt_hashlimit_htable *ht = (struct xt_hashlimit_htable *)htlong;
284
285         htable_selective_cleanup(ht, select_gc);
286
287         /* re-add the timer accordingly */
288         ht->timer.expires = jiffies + msecs_to_jiffies(ht->cfg.gc_interval);
289         add_timer(&ht->timer);
290 }
291
292 static void htable_destroy(struct xt_hashlimit_htable *hinfo)
293 {
294         /* remove timer, if it is pending */
295         if (timer_pending(&hinfo->timer))
296                 del_timer(&hinfo->timer);
297
298         /* remove proc entry */
299         remove_proc_entry(hinfo->pde->name,
300                           hinfo->family == AF_INET ? hashlimit_procdir4 :
301                                                      hashlimit_procdir6);
302         htable_selective_cleanup(hinfo, select_all);
303         vfree(hinfo);
304 }
305
306 static struct xt_hashlimit_htable *htable_find_get(const char *name,
307                                                    int family)
308 {
309         struct xt_hashlimit_htable *hinfo;
310         struct hlist_node *pos;
311
312         spin_lock_bh(&hashlimit_lock);
313         hlist_for_each_entry(hinfo, pos, &hashlimit_htables, node) {
314                 if (!strcmp(name, hinfo->pde->name) &&
315                     hinfo->family == family) {
316                         atomic_inc(&hinfo->use);
317                         spin_unlock_bh(&hashlimit_lock);
318                         return hinfo;
319                 }
320         }
321         spin_unlock_bh(&hashlimit_lock);
322         return NULL;
323 }
324
325 static void htable_put(struct xt_hashlimit_htable *hinfo)
326 {
327         if (atomic_dec_and_test(&hinfo->use)) {
328                 spin_lock_bh(&hashlimit_lock);
329                 hlist_del(&hinfo->node);
330                 spin_unlock_bh(&hashlimit_lock);
331                 htable_destroy(hinfo);
332         }
333 }
334
335 /* The algorithm used is the Simple Token Bucket Filter (TBF)
336  * see net/sched/sch_tbf.c in the linux source tree
337  */
338
339 /* Rusty: This is my (non-mathematically-inclined) understanding of
340    this algorithm.  The `average rate' in jiffies becomes your initial
341    amount of credit `credit' and the most credit you can ever have
342    `credit_cap'.  The `peak rate' becomes the cost of passing the
343    test, `cost'.
344
345    `prev' tracks the last packet hit: you gain one credit per jiffy.
346    If you get credit balance more than this, the extra credit is
347    discarded.  Every time the match passes, you lose `cost' credits;
348    if you don't have that many, the test fails.
349
350    See Alexey's formal explanation in net/sched/sch_tbf.c.
351
352    To get the maximum range, we multiply by this factor (ie. you get N
353    credits per jiffy).  We want to allow a rate as low as 1 per day
354    (slowest userspace tool allows), which means
355    CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie.
356 */
357 #define MAX_CPJ (0xFFFFFFFF / (HZ*60*60*24))
358
359 /* Repeated shift and or gives us all 1s, final shift and add 1 gives
360  * us the power of 2 below the theoretical max, so GCC simply does a
361  * shift. */
362 #define _POW2_BELOW2(x) ((x)|((x)>>1))
363 #define _POW2_BELOW4(x) (_POW2_BELOW2(x)|_POW2_BELOW2((x)>>2))
364 #define _POW2_BELOW8(x) (_POW2_BELOW4(x)|_POW2_BELOW4((x)>>4))
365 #define _POW2_BELOW16(x) (_POW2_BELOW8(x)|_POW2_BELOW8((x)>>8))
366 #define _POW2_BELOW32(x) (_POW2_BELOW16(x)|_POW2_BELOW16((x)>>16))
367 #define POW2_BELOW32(x) ((_POW2_BELOW32(x)>>1) + 1)
368
369 #define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
370
371 /* Precision saver. */
372 static inline u_int32_t
373 user2credits(u_int32_t user)
374 {
375         /* If multiplying would overflow... */
376         if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
377                 /* Divide first. */
378                 return (user / XT_HASHLIMIT_SCALE) * HZ * CREDITS_PER_JIFFY;
379
380         return (user * HZ * CREDITS_PER_JIFFY) / XT_HASHLIMIT_SCALE;
381 }
382
383 static inline void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now)
384 {
385         dh->rateinfo.credit += (now - dh->rateinfo.prev) * CREDITS_PER_JIFFY;
386         if (dh->rateinfo.credit > dh->rateinfo.credit_cap)
387                 dh->rateinfo.credit = dh->rateinfo.credit_cap;
388         dh->rateinfo.prev = now;
389 }
390
391 static int
392 hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
393                    struct dsthash_dst *dst,
394                    const struct sk_buff *skb, unsigned int protoff)
395 {
396         __be16 _ports[2], *ports;
397         u8 nexthdr;
398
399         memset(dst, 0, sizeof(*dst));
400
401         switch (hinfo->family) {
402         case AF_INET:
403                 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP)
404                         dst->addr.ip.dst = ip_hdr(skb)->daddr;
405                 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP)
406                         dst->addr.ip.src = ip_hdr(skb)->saddr;
407
408                 if (!(hinfo->cfg.mode &
409                       (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT)))
410                         return 0;
411                 nexthdr = ip_hdr(skb)->protocol;
412                 break;
413 #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
414         case AF_INET6:
415                 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP)
416                         memcpy(&dst->addr.ip6.dst, &ipv6_hdr(skb)->daddr,
417                                sizeof(dst->addr.ip6.dst));
418                 if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP)
419                         memcpy(&dst->addr.ip6.src, &ipv6_hdr(skb)->saddr,
420                                sizeof(dst->addr.ip6.src));
421
422                 if (!(hinfo->cfg.mode &
423                       (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT)))
424                         return 0;
425                 nexthdr = ipv6_hdr(skb)->nexthdr;
426                 protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
427                 if ((int)protoff < 0)
428                         return -1;
429                 break;
430 #endif
431         default:
432                 BUG();
433                 return 0;
434         }
435
436         switch (nexthdr) {
437         case IPPROTO_TCP:
438         case IPPROTO_UDP:
439         case IPPROTO_UDPLITE:
440         case IPPROTO_SCTP:
441         case IPPROTO_DCCP:
442                 ports = skb_header_pointer(skb, protoff, sizeof(_ports),
443                                            &_ports);
444                 break;
445         default:
446                 _ports[0] = _ports[1] = 0;
447                 ports = _ports;
448                 break;
449         }
450         if (!ports)
451                 return -1;
452         if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SPT)
453                 dst->src_port = ports[0];
454         if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DPT)
455                 dst->dst_port = ports[1];
456         return 0;
457 }
458
459 static bool
460 hashlimit_mt(const struct sk_buff *skb, const struct net_device *in,
461              const struct net_device *out, const struct xt_match *match,
462              const void *matchinfo, int offset, unsigned int protoff,
463              bool *hotdrop)
464 {
465         const struct xt_hashlimit_info *r =
466                 ((const struct xt_hashlimit_info *)matchinfo)->u.master;
467         struct xt_hashlimit_htable *hinfo = r->hinfo;
468         unsigned long now = jiffies;
469         struct dsthash_ent *dh;
470         struct dsthash_dst dst;
471
472         if (hashlimit_init_dst(hinfo, &dst, skb, protoff) < 0)
473                 goto hotdrop;
474
475         spin_lock_bh(&hinfo->lock);
476         dh = dsthash_find(hinfo, &dst);
477         if (!dh) {
478                 dh = dsthash_alloc_init(hinfo, &dst);
479                 if (!dh) {
480                         spin_unlock_bh(&hinfo->lock);
481                         goto hotdrop;
482                 }
483
484                 dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire);
485                 dh->rateinfo.prev = jiffies;
486                 dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
487                                                    hinfo->cfg.burst);
488                 dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg *
489                                                        hinfo->cfg.burst);
490                 dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
491         } else {
492                 /* update expiration timeout */
493                 dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
494                 rateinfo_recalc(dh, now);
495         }
496
497         if (dh->rateinfo.credit >= dh->rateinfo.cost) {
498                 /* We're underlimit. */
499                 dh->rateinfo.credit -= dh->rateinfo.cost;
500                 spin_unlock_bh(&hinfo->lock);
501                 return true;
502         }
503
504         spin_unlock_bh(&hinfo->lock);
505
506         /* default case: we're overlimit, thus don't match */
507         return false;
508
509 hotdrop:
510         *hotdrop = true;
511         return false;
512 }
513
514 static bool
515 hashlimit_mt_check(const char *tablename, const void *inf,
516                    const struct xt_match *match, void *matchinfo,
517                    unsigned int hook_mask)
518 {
519         struct xt_hashlimit_info *r = matchinfo;
520
521         /* Check for overflow. */
522         if (r->cfg.burst == 0 ||
523             user2credits(r->cfg.avg * r->cfg.burst) < user2credits(r->cfg.avg)) {
524                 printk(KERN_ERR "xt_hashlimit: overflow, try lower: %u/%u\n",
525                        r->cfg.avg, r->cfg.burst);
526                 return false;
527         }
528         if (r->cfg.mode == 0 ||
529             r->cfg.mode > (XT_HASHLIMIT_HASH_DPT |
530                            XT_HASHLIMIT_HASH_DIP |
531                            XT_HASHLIMIT_HASH_SIP |
532                            XT_HASHLIMIT_HASH_SPT))
533                 return false;
534         if (!r->cfg.gc_interval)
535                 return false;
536         if (!r->cfg.expire)
537                 return false;
538         if (r->name[sizeof(r->name) - 1] != '\0')
539                 return false;
540
541         /* This is the best we've got: We cannot release and re-grab lock,
542          * since checkentry() is called before x_tables.c grabs xt_mutex.
543          * We also cannot grab the hashtable spinlock, since htable_create will
544          * call vmalloc, and that can sleep.  And we cannot just re-search
545          * the list of htable's in htable_create(), since then we would
546          * create duplicate proc files. -HW */
547         mutex_lock(&hlimit_mutex);
548         r->hinfo = htable_find_get(r->name, match->family);
549         if (!r->hinfo && htable_create(r, match->family) != 0) {
550                 mutex_unlock(&hlimit_mutex);
551                 return false;
552         }
553         mutex_unlock(&hlimit_mutex);
554
555         /* Ugly hack: For SMP, we only want to use one set */
556         r->u.master = r;
557         return true;
558 }
559
560 static void
561 hashlimit_mt_destroy(const struct xt_match *match, void *matchinfo)
562 {
563         const struct xt_hashlimit_info *r = matchinfo;
564
565         htable_put(r->hinfo);
566 }
567
568 #ifdef CONFIG_COMPAT
569 struct compat_xt_hashlimit_info {
570         char name[IFNAMSIZ];
571         struct hashlimit_cfg cfg;
572         compat_uptr_t hinfo;
573         compat_uptr_t master;
574 };
575
576 static void hashlimit_mt_compat_from_user(void *dst, void *src)
577 {
578         int off = offsetof(struct compat_xt_hashlimit_info, hinfo);
579
580         memcpy(dst, src, off);
581         memset(dst + off, 0, sizeof(struct compat_xt_hashlimit_info) - off);
582 }
583
584 static int hashlimit_mt_compat_to_user(void __user *dst, void *src)
585 {
586         int off = offsetof(struct compat_xt_hashlimit_info, hinfo);
587
588         return copy_to_user(dst, src, off) ? -EFAULT : 0;
589 }
590 #endif
591
592 static struct xt_match hashlimit_mt_reg[] __read_mostly = {
593         {
594                 .name           = "hashlimit",
595                 .family         = AF_INET,
596                 .match          = hashlimit_mt,
597                 .matchsize      = sizeof(struct xt_hashlimit_info),
598 #ifdef CONFIG_COMPAT
599                 .compatsize     = sizeof(struct compat_xt_hashlimit_info),
600                 .compat_from_user = hashlimit_mt_compat_from_user,
601                 .compat_to_user = hashlimit_mt_compat_to_user,
602 #endif
603                 .checkentry     = hashlimit_mt_check,
604                 .destroy        = hashlimit_mt_destroy,
605                 .me             = THIS_MODULE
606         },
607 #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
608         {
609                 .name           = "hashlimit",
610                 .family         = AF_INET6,
611                 .match          = hashlimit_mt,
612                 .matchsize      = sizeof(struct xt_hashlimit_info),
613 #ifdef CONFIG_COMPAT
614                 .compatsize     = sizeof(struct compat_xt_hashlimit_info),
615                 .compat_from_user = hashlimit_mt_compat_from_user,
616                 .compat_to_user = hashlimit_mt_compat_to_user,
617 #endif
618                 .checkentry     = hashlimit_mt_check,
619                 .destroy        = hashlimit_mt_destroy,
620                 .me             = THIS_MODULE
621         },
622 #endif
623 };
624
625 /* PROC stuff */
626 static void *dl_seq_start(struct seq_file *s, loff_t *pos)
627         __acquires(htable->lock)
628 {
629         struct proc_dir_entry *pde = s->private;
630         struct xt_hashlimit_htable *htable = pde->data;
631         unsigned int *bucket;
632
633         spin_lock_bh(&htable->lock);
634         if (*pos >= htable->cfg.size)
635                 return NULL;
636
637         bucket = kmalloc(sizeof(unsigned int), GFP_ATOMIC);
638         if (!bucket)
639                 return ERR_PTR(-ENOMEM);
640
641         *bucket = *pos;
642         return bucket;
643 }
644
645 static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
646 {
647         struct proc_dir_entry *pde = s->private;
648         struct xt_hashlimit_htable *htable = pde->data;
649         unsigned int *bucket = (unsigned int *)v;
650
651         *pos = ++(*bucket);
652         if (*pos >= htable->cfg.size) {
653                 kfree(v);
654                 return NULL;
655         }
656         return bucket;
657 }
658
659 static void dl_seq_stop(struct seq_file *s, void *v)
660         __releases(htable->lock)
661 {
662         struct proc_dir_entry *pde = s->private;
663         struct xt_hashlimit_htable *htable = pde->data;
664         unsigned int *bucket = (unsigned int *)v;
665
666         kfree(bucket);
667         spin_unlock_bh(&htable->lock);
668 }
669
670 static int dl_seq_real_show(struct dsthash_ent *ent, int family,
671                                    struct seq_file *s)
672 {
673         /* recalculate to show accurate numbers */
674         rateinfo_recalc(ent, jiffies);
675
676         switch (family) {
677         case AF_INET:
678                 return seq_printf(s, "%ld %u.%u.%u.%u:%u->"
679                                      "%u.%u.%u.%u:%u %u %u %u\n",
680                                  (long)(ent->expires - jiffies)/HZ,
681                                  NIPQUAD(ent->dst.addr.ip.src),
682                                  ntohs(ent->dst.src_port),
683                                  NIPQUAD(ent->dst.addr.ip.dst),
684                                  ntohs(ent->dst.dst_port),
685                                  ent->rateinfo.credit, ent->rateinfo.credit_cap,
686                                  ent->rateinfo.cost);
687 #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
688         case AF_INET6:
689                 return seq_printf(s, "%ld " NIP6_FMT ":%u->"
690                                      NIP6_FMT ":%u %u %u %u\n",
691                                  (long)(ent->expires - jiffies)/HZ,
692                                  NIP6(*(struct in6_addr *)&ent->dst.addr.ip6.src),
693                                  ntohs(ent->dst.src_port),
694                                  NIP6(*(struct in6_addr *)&ent->dst.addr.ip6.dst),
695                                  ntohs(ent->dst.dst_port),
696                                  ent->rateinfo.credit, ent->rateinfo.credit_cap,
697                                  ent->rateinfo.cost);
698 #endif
699         default:
700                 BUG();
701                 return 0;
702         }
703 }
704
705 static int dl_seq_show(struct seq_file *s, void *v)
706 {
707         struct proc_dir_entry *pde = s->private;
708         struct xt_hashlimit_htable *htable = pde->data;
709         unsigned int *bucket = (unsigned int *)v;
710         struct dsthash_ent *ent;
711         struct hlist_node *pos;
712
713         if (!hlist_empty(&htable->hash[*bucket])) {
714                 hlist_for_each_entry(ent, pos, &htable->hash[*bucket], node)
715                         if (dl_seq_real_show(ent, htable->family, s))
716                                 return 1;
717         }
718         return 0;
719 }
720
721 static const struct seq_operations dl_seq_ops = {
722         .start = dl_seq_start,
723         .next  = dl_seq_next,
724         .stop  = dl_seq_stop,
725         .show  = dl_seq_show
726 };
727
728 static int dl_proc_open(struct inode *inode, struct file *file)
729 {
730         int ret = seq_open(file, &dl_seq_ops);
731
732         if (!ret) {
733                 struct seq_file *sf = file->private_data;
734                 sf->private = PDE(inode);
735         }
736         return ret;
737 }
738
739 static const struct file_operations dl_file_ops = {
740         .owner   = THIS_MODULE,
741         .open    = dl_proc_open,
742         .read    = seq_read,
743         .llseek  = seq_lseek,
744         .release = seq_release
745 };
746
747 static int __init hashlimit_mt_init(void)
748 {
749         int err;
750
751         err = xt_register_matches(hashlimit_mt_reg,
752               ARRAY_SIZE(hashlimit_mt_reg));
753         if (err < 0)
754                 goto err1;
755
756         err = -ENOMEM;
757         hashlimit_cachep = kmem_cache_create("xt_hashlimit",
758                                             sizeof(struct dsthash_ent), 0, 0,
759                                             NULL);
760         if (!hashlimit_cachep) {
761                 printk(KERN_ERR "xt_hashlimit: unable to create slab cache\n");
762                 goto err2;
763         }
764         hashlimit_procdir4 = proc_mkdir("ipt_hashlimit", init_net.proc_net);
765         if (!hashlimit_procdir4) {
766                 printk(KERN_ERR "xt_hashlimit: unable to create proc dir "
767                                 "entry\n");
768                 goto err3;
769         }
770         err = 0;
771 #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
772         hashlimit_procdir6 = proc_mkdir("ip6t_hashlimit", init_net.proc_net);
773         if (!hashlimit_procdir6) {
774                 printk(KERN_ERR "xt_hashlimit: unable to create proc dir "
775                                 "entry\n");
776                 err = -ENOMEM;
777         }
778 #endif
779         if (!err)
780                 return 0;
781         remove_proc_entry("ipt_hashlimit", init_net.proc_net);
782 err3:
783         kmem_cache_destroy(hashlimit_cachep);
784 err2:
785         xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg));
786 err1:
787         return err;
788
789 }
790
791 static void __exit hashlimit_mt_exit(void)
792 {
793         remove_proc_entry("ipt_hashlimit", init_net.proc_net);
794 #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
795         remove_proc_entry("ip6t_hashlimit", init_net.proc_net);
796 #endif
797         kmem_cache_destroy(hashlimit_cachep);
798         xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg));
799 }
800
801 module_init(hashlimit_mt_init);
802 module_exit(hashlimit_mt_exit);