inet fragments: fix race between inet_frag_find and inet_frag_secret_rebuild
[safe/jmp/linux-2.6] / net / ipv6 / reassembly.c
1 /*
2  *      IPv6 fragment reassembly
3  *      Linux INET6 implementation
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>
7  *
8  *      $Id: reassembly.c,v 1.26 2001/03/07 22:00:57 davem Exp $
9  *
10  *      Based on: net/ipv4/ip_fragment.c
11  *
12  *      This program is free software; you can redistribute it and/or
13  *      modify it under the terms of the GNU General Public License
14  *      as published by the Free Software Foundation; either version
15  *      2 of the License, or (at your option) any later version.
16  */
17
18 /*
19  *      Fixes:
20  *      Andi Kleen      Make it work with multiple hosts.
21  *                      More RFC compliance.
22  *
23  *      Horst von Brand Add missing #include <linux/string.h>
24  *      Alexey Kuznetsov        SMP races, threading, cleanup.
25  *      Patrick McHardy         LRU queue of frag heads for evictor.
26  *      Mitsuru KANDA @USAGI    Register inet6_protocol{}.
27  *      David Stevens and
28  *      YOSHIFUJI,H. @USAGI     Always remove fragment header to
29  *                              calculate ICV correctly.
30  */
31 #include <linux/errno.h>
32 #include <linux/types.h>
33 #include <linux/string.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/jiffies.h>
37 #include <linux/net.h>
38 #include <linux/list.h>
39 #include <linux/netdevice.h>
40 #include <linux/in6.h>
41 #include <linux/ipv6.h>
42 #include <linux/icmpv6.h>
43 #include <linux/random.h>
44 #include <linux/jhash.h>
45 #include <linux/skbuff.h>
46
47 #include <net/sock.h>
48 #include <net/snmp.h>
49
50 #include <net/ipv6.h>
51 #include <net/ip6_route.h>
52 #include <net/protocol.h>
53 #include <net/transp_v6.h>
54 #include <net/rawv6.h>
55 #include <net/ndisc.h>
56 #include <net/addrconf.h>
57 #include <net/inet_frag.h>
58
59 struct ip6frag_skb_cb
60 {
61         struct inet6_skb_parm   h;
62         int                     offset;
63 };
64
65 #define FRAG6_CB(skb)   ((struct ip6frag_skb_cb*)((skb)->cb))
66
67
68 /*
69  *      Equivalent of ipv4 struct ipq
70  */
71
72 struct frag_queue
73 {
74         struct inet_frag_queue  q;
75
76         __be32                  id;             /* fragment id          */
77         struct in6_addr         saddr;
78         struct in6_addr         daddr;
79
80         int                     iif;
81         unsigned int            csum;
82         __u16                   nhoffset;
83 };
84
85 static struct inet_frags ip6_frags;
86
87 int ip6_frag_nqueues(struct net *net)
88 {
89         return net->ipv6.frags.nqueues;
90 }
91
92 int ip6_frag_mem(struct net *net)
93 {
94         return atomic_read(&net->ipv6.frags.mem);
95 }
96
97 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
98                           struct net_device *dev);
99
100 /*
101  * callers should be careful not to use the hash value outside the ipfrag_lock
102  * as doing so could race with ipfrag_hash_rnd being recalculated.
103  */
104 static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
105                                struct in6_addr *daddr)
106 {
107         u32 a, b, c;
108
109         a = (__force u32)saddr->s6_addr32[0];
110         b = (__force u32)saddr->s6_addr32[1];
111         c = (__force u32)saddr->s6_addr32[2];
112
113         a += JHASH_GOLDEN_RATIO;
114         b += JHASH_GOLDEN_RATIO;
115         c += ip6_frags.rnd;
116         __jhash_mix(a, b, c);
117
118         a += (__force u32)saddr->s6_addr32[3];
119         b += (__force u32)daddr->s6_addr32[0];
120         c += (__force u32)daddr->s6_addr32[1];
121         __jhash_mix(a, b, c);
122
123         a += (__force u32)daddr->s6_addr32[2];
124         b += (__force u32)daddr->s6_addr32[3];
125         c += (__force u32)id;
126         __jhash_mix(a, b, c);
127
128         return c & (INETFRAGS_HASHSZ - 1);
129 }
130
131 static unsigned int ip6_hashfn(struct inet_frag_queue *q)
132 {
133         struct frag_queue *fq;
134
135         fq = container_of(q, struct frag_queue, q);
136         return ip6qhashfn(fq->id, &fq->saddr, &fq->daddr);
137 }
138
139 int ip6_frag_match(struct inet_frag_queue *q, void *a)
140 {
141         struct frag_queue *fq;
142         struct ip6_create_arg *arg = a;
143
144         fq = container_of(q, struct frag_queue, q);
145         return (fq->id == arg->id &&
146                         ipv6_addr_equal(&fq->saddr, arg->src) &&
147                         ipv6_addr_equal(&fq->daddr, arg->dst));
148 }
149 EXPORT_SYMBOL(ip6_frag_match);
150
151 /* Memory Tracking Functions. */
152 static inline void frag_kfree_skb(struct netns_frags *nf,
153                 struct sk_buff *skb, int *work)
154 {
155         if (work)
156                 *work -= skb->truesize;
157         atomic_sub(skb->truesize, &nf->mem);
158         kfree_skb(skb);
159 }
160
161 void ip6_frag_init(struct inet_frag_queue *q, void *a)
162 {
163         struct frag_queue *fq = container_of(q, struct frag_queue, q);
164         struct ip6_create_arg *arg = a;
165
166         fq->id = arg->id;
167         ipv6_addr_copy(&fq->saddr, arg->src);
168         ipv6_addr_copy(&fq->daddr, arg->dst);
169 }
170 EXPORT_SYMBOL(ip6_frag_init);
171
172 /* Destruction primitives. */
173
174 static __inline__ void fq_put(struct frag_queue *fq)
175 {
176         inet_frag_put(&fq->q, &ip6_frags);
177 }
178
179 /* Kill fq entry. It is not destroyed immediately,
180  * because caller (and someone more) holds reference count.
181  */
182 static __inline__ void fq_kill(struct frag_queue *fq)
183 {
184         inet_frag_kill(&fq->q, &ip6_frags);
185 }
186
187 static void ip6_evictor(struct net *net, struct inet6_dev *idev)
188 {
189         int evicted;
190
191         evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags);
192         if (evicted)
193                 IP6_ADD_STATS_BH(idev, IPSTATS_MIB_REASMFAILS, evicted);
194 }
195
196 static void ip6_frag_expire(unsigned long data)
197 {
198         struct frag_queue *fq;
199         struct net_device *dev = NULL;
200         struct net *net;
201
202         fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
203
204         spin_lock(&fq->q.lock);
205
206         if (fq->q.last_in & INET_FRAG_COMPLETE)
207                 goto out;
208
209         fq_kill(fq);
210
211         net = container_of(fq->q.net, struct net, ipv6.frags);
212         dev = dev_get_by_index(net, fq->iif);
213         if (!dev)
214                 goto out;
215
216         rcu_read_lock();
217         IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
218         IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
219         rcu_read_unlock();
220
221         /* Don't send error if the first segment did not arrive. */
222         if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments)
223                 goto out;
224
225         /*
226            But use as source device on which LAST ARRIVED
227            segment was received. And do not use fq->dev
228            pointer directly, device might already disappeared.
229          */
230         fq->q.fragments->dev = dev;
231         icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev);
232 out:
233         if (dev)
234                 dev_put(dev);
235         spin_unlock(&fq->q.lock);
236         fq_put(fq);
237 }
238
239 static __inline__ struct frag_queue *
240 fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst,
241         struct inet6_dev *idev)
242 {
243         struct inet_frag_queue *q;
244         struct ip6_create_arg arg;
245         unsigned int hash;
246
247         arg.id = id;
248         arg.src = src;
249         arg.dst = dst;
250
251         read_lock(&ip6_frags.lock);
252         hash = ip6qhashfn(id, src, dst);
253
254         q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
255         if (q == NULL)
256                 goto oom;
257
258         return container_of(q, struct frag_queue, q);
259
260 oom:
261         IP6_INC_STATS_BH(idev, IPSTATS_MIB_REASMFAILS);
262         return NULL;
263 }
264
265 static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
266                            struct frag_hdr *fhdr, int nhoff)
267 {
268         struct sk_buff *prev, *next;
269         struct net_device *dev;
270         int offset, end;
271
272         if (fq->q.last_in & INET_FRAG_COMPLETE)
273                 goto err;
274
275         offset = ntohs(fhdr->frag_off) & ~0x7;
276         end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
277                         ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
278
279         if ((unsigned int)end > IPV6_MAXPLEN) {
280                 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
281                                  IPSTATS_MIB_INHDRERRORS);
282                 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
283                                   ((u8 *)&fhdr->frag_off -
284                                    skb_network_header(skb)));
285                 return -1;
286         }
287
288         if (skb->ip_summed == CHECKSUM_COMPLETE) {
289                 const unsigned char *nh = skb_network_header(skb);
290                 skb->csum = csum_sub(skb->csum,
291                                      csum_partial(nh, (u8 *)(fhdr + 1) - nh,
292                                                   0));
293         }
294
295         /* Is this the final fragment? */
296         if (!(fhdr->frag_off & htons(IP6_MF))) {
297                 /* If we already have some bits beyond end
298                  * or have different end, the segment is corrupted.
299                  */
300                 if (end < fq->q.len ||
301                     ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
302                         goto err;
303                 fq->q.last_in |= INET_FRAG_LAST_IN;
304                 fq->q.len = end;
305         } else {
306                 /* Check if the fragment is rounded to 8 bytes.
307                  * Required by the RFC.
308                  */
309                 if (end & 0x7) {
310                         /* RFC2460 says always send parameter problem in
311                          * this case. -DaveM
312                          */
313                         IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
314                                          IPSTATS_MIB_INHDRERRORS);
315                         icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
316                                           offsetof(struct ipv6hdr, payload_len));
317                         return -1;
318                 }
319                 if (end > fq->q.len) {
320                         /* Some bits beyond end -> corruption. */
321                         if (fq->q.last_in & INET_FRAG_LAST_IN)
322                                 goto err;
323                         fq->q.len = end;
324                 }
325         }
326
327         if (end == offset)
328                 goto err;
329
330         /* Point into the IP datagram 'data' part. */
331         if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
332                 goto err;
333
334         if (pskb_trim_rcsum(skb, end - offset))
335                 goto err;
336
337         /* Find out which fragments are in front and at the back of us
338          * in the chain of fragments so far.  We must know where to put
339          * this fragment, right?
340          */
341         prev = NULL;
342         for(next = fq->q.fragments; next != NULL; next = next->next) {
343                 if (FRAG6_CB(next)->offset >= offset)
344                         break;  /* bingo! */
345                 prev = next;
346         }
347
348         /* We found where to put this one.  Check for overlap with
349          * preceding fragment, and, if needed, align things so that
350          * any overlaps are eliminated.
351          */
352         if (prev) {
353                 int i = (FRAG6_CB(prev)->offset + prev->len) - offset;
354
355                 if (i > 0) {
356                         offset += i;
357                         if (end <= offset)
358                                 goto err;
359                         if (!pskb_pull(skb, i))
360                                 goto err;
361                         if (skb->ip_summed != CHECKSUM_UNNECESSARY)
362                                 skb->ip_summed = CHECKSUM_NONE;
363                 }
364         }
365
366         /* Look for overlap with succeeding segments.
367          * If we can merge fragments, do it.
368          */
369         while (next && FRAG6_CB(next)->offset < end) {
370                 int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */
371
372                 if (i < next->len) {
373                         /* Eat head of the next overlapped fragment
374                          * and leave the loop. The next ones cannot overlap.
375                          */
376                         if (!pskb_pull(next, i))
377                                 goto err;
378                         FRAG6_CB(next)->offset += i;    /* next fragment */
379                         fq->q.meat -= i;
380                         if (next->ip_summed != CHECKSUM_UNNECESSARY)
381                                 next->ip_summed = CHECKSUM_NONE;
382                         break;
383                 } else {
384                         struct sk_buff *free_it = next;
385
386                         /* Old fragment is completely overridden with
387                          * new one drop it.
388                          */
389                         next = next->next;
390
391                         if (prev)
392                                 prev->next = next;
393                         else
394                                 fq->q.fragments = next;
395
396                         fq->q.meat -= free_it->len;
397                         frag_kfree_skb(fq->q.net, free_it, NULL);
398                 }
399         }
400
401         FRAG6_CB(skb)->offset = offset;
402
403         /* Insert this fragment in the chain of fragments. */
404         skb->next = next;
405         if (prev)
406                 prev->next = skb;
407         else
408                 fq->q.fragments = skb;
409
410         dev = skb->dev;
411         if (dev) {
412                 fq->iif = dev->ifindex;
413                 skb->dev = NULL;
414         }
415         fq->q.stamp = skb->tstamp;
416         fq->q.meat += skb->len;
417         atomic_add(skb->truesize, &fq->q.net->mem);
418
419         /* The first fragment.
420          * nhoffset is obtained from the first fragment, of course.
421          */
422         if (offset == 0) {
423                 fq->nhoffset = nhoff;
424                 fq->q.last_in |= INET_FRAG_FIRST_IN;
425         }
426
427         if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
428             fq->q.meat == fq->q.len)
429                 return ip6_frag_reasm(fq, prev, dev);
430
431         write_lock(&ip6_frags.lock);
432         list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
433         write_unlock(&ip6_frags.lock);
434         return -1;
435
436 err:
437         IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS);
438         kfree_skb(skb);
439         return -1;
440 }
441
442 /*
443  *      Check if this packet is complete.
444  *      Returns NULL on failure by any reason, and pointer
445  *      to current nexthdr field in reassembled frame.
446  *
447  *      It is called with locked fq, and caller must check that
448  *      queue is eligible for reassembly i.e. it is not COMPLETE,
449  *      the last and the first frames arrived and all the bits are here.
450  */
451 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
452                           struct net_device *dev)
453 {
454         struct sk_buff *fp, *head = fq->q.fragments;
455         int    payload_len;
456         unsigned int nhoff;
457
458         fq_kill(fq);
459
460         /* Make the one we just received the head. */
461         if (prev) {
462                 head = prev->next;
463                 fp = skb_clone(head, GFP_ATOMIC);
464
465                 if (!fp)
466                         goto out_oom;
467
468                 fp->next = head->next;
469                 prev->next = fp;
470
471                 skb_morph(head, fq->q.fragments);
472                 head->next = fq->q.fragments->next;
473
474                 kfree_skb(fq->q.fragments);
475                 fq->q.fragments = head;
476         }
477
478         BUG_TRAP(head != NULL);
479         BUG_TRAP(FRAG6_CB(head)->offset == 0);
480
481         /* Unfragmented part is taken from the first segment. */
482         payload_len = ((head->data - skb_network_header(head)) -
483                        sizeof(struct ipv6hdr) + fq->q.len -
484                        sizeof(struct frag_hdr));
485         if (payload_len > IPV6_MAXPLEN)
486                 goto out_oversize;
487
488         /* Head of list must not be cloned. */
489         if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
490                 goto out_oom;
491
492         /* If the first fragment is fragmented itself, we split
493          * it to two chunks: the first with data and paged part
494          * and the second, holding only fragments. */
495         if (skb_shinfo(head)->frag_list) {
496                 struct sk_buff *clone;
497                 int i, plen = 0;
498
499                 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
500                         goto out_oom;
501                 clone->next = head->next;
502                 head->next = clone;
503                 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
504                 skb_shinfo(head)->frag_list = NULL;
505                 for (i=0; i<skb_shinfo(head)->nr_frags; i++)
506                         plen += skb_shinfo(head)->frags[i].size;
507                 clone->len = clone->data_len = head->data_len - plen;
508                 head->data_len -= clone->len;
509                 head->len -= clone->len;
510                 clone->csum = 0;
511                 clone->ip_summed = head->ip_summed;
512                 atomic_add(clone->truesize, &fq->q.net->mem);
513         }
514
515         /* We have to remove fragment header from datagram and to relocate
516          * header in order to calculate ICV correctly. */
517         nhoff = fq->nhoffset;
518         skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
519         memmove(head->head + sizeof(struct frag_hdr), head->head,
520                 (head->data - head->head) - sizeof(struct frag_hdr));
521         head->mac_header += sizeof(struct frag_hdr);
522         head->network_header += sizeof(struct frag_hdr);
523
524         skb_shinfo(head)->frag_list = head->next;
525         skb_reset_transport_header(head);
526         skb_push(head, head->data - skb_network_header(head));
527         atomic_sub(head->truesize, &fq->q.net->mem);
528
529         for (fp=head->next; fp; fp = fp->next) {
530                 head->data_len += fp->len;
531                 head->len += fp->len;
532                 if (head->ip_summed != fp->ip_summed)
533                         head->ip_summed = CHECKSUM_NONE;
534                 else if (head->ip_summed == CHECKSUM_COMPLETE)
535                         head->csum = csum_add(head->csum, fp->csum);
536                 head->truesize += fp->truesize;
537                 atomic_sub(fp->truesize, &fq->q.net->mem);
538         }
539
540         head->next = NULL;
541         head->dev = dev;
542         head->tstamp = fq->q.stamp;
543         ipv6_hdr(head)->payload_len = htons(payload_len);
544         IP6CB(head)->nhoff = nhoff;
545
546         /* Yes, and fold redundant checksum back. 8) */
547         if (head->ip_summed == CHECKSUM_COMPLETE)
548                 head->csum = csum_partial(skb_network_header(head),
549                                           skb_network_header_len(head),
550                                           head->csum);
551
552         rcu_read_lock();
553         IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
554         rcu_read_unlock();
555         fq->q.fragments = NULL;
556         return 1;
557
558 out_oversize:
559         if (net_ratelimit())
560                 printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
561         goto out_fail;
562 out_oom:
563         if (net_ratelimit())
564                 printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
565 out_fail:
566         rcu_read_lock();
567         IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
568         rcu_read_unlock();
569         return -1;
570 }
571
572 static int ipv6_frag_rcv(struct sk_buff *skb)
573 {
574         struct frag_hdr *fhdr;
575         struct frag_queue *fq;
576         struct ipv6hdr *hdr = ipv6_hdr(skb);
577         struct net *net;
578
579         IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMREQDS);
580
581         /* Jumbo payload inhibits frag. header */
582         if (hdr->payload_len==0) {
583                 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
584                 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
585                                   skb_network_header_len(skb));
586                 return -1;
587         }
588         if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
589                                  sizeof(struct frag_hdr)))) {
590                 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
591                 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
592                                   skb_network_header_len(skb));
593                 return -1;
594         }
595
596         hdr = ipv6_hdr(skb);
597         fhdr = (struct frag_hdr *)skb_transport_header(skb);
598
599         if (!(fhdr->frag_off & htons(0xFFF9))) {
600                 /* It is not a fragmented frame */
601                 skb->transport_header += sizeof(struct frag_hdr);
602                 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMOKS);
603
604                 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
605                 return 1;
606         }
607
608         net = dev_net(skb->dev);
609         if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh)
610                 ip6_evictor(net, ip6_dst_idev(skb->dst));
611
612         if ((fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
613                           ip6_dst_idev(skb->dst))) != NULL) {
614                 int ret;
615
616                 spin_lock(&fq->q.lock);
617
618                 ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
619
620                 spin_unlock(&fq->q.lock);
621                 fq_put(fq);
622                 return ret;
623         }
624
625         IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS);
626         kfree_skb(skb);
627         return -1;
628 }
629
630 static struct inet6_protocol frag_protocol =
631 {
632         .handler        =       ipv6_frag_rcv,
633         .flags          =       INET6_PROTO_NOPOLICY,
634 };
635
636 #ifdef CONFIG_SYSCTL
637 static struct ctl_table ip6_frags_ctl_table[] = {
638         {
639                 .ctl_name       = NET_IPV6_IP6FRAG_HIGH_THRESH,
640                 .procname       = "ip6frag_high_thresh",
641                 .data           = &init_net.ipv6.frags.high_thresh,
642                 .maxlen         = sizeof(int),
643                 .mode           = 0644,
644                 .proc_handler   = &proc_dointvec
645         },
646         {
647                 .ctl_name       = NET_IPV6_IP6FRAG_LOW_THRESH,
648                 .procname       = "ip6frag_low_thresh",
649                 .data           = &init_net.ipv6.frags.low_thresh,
650                 .maxlen         = sizeof(int),
651                 .mode           = 0644,
652                 .proc_handler   = &proc_dointvec
653         },
654         {
655                 .ctl_name       = NET_IPV6_IP6FRAG_TIME,
656                 .procname       = "ip6frag_time",
657                 .data           = &init_net.ipv6.frags.timeout,
658                 .maxlen         = sizeof(int),
659                 .mode           = 0644,
660                 .proc_handler   = &proc_dointvec_jiffies,
661                 .strategy       = &sysctl_jiffies,
662         },
663         {
664                 .ctl_name       = NET_IPV6_IP6FRAG_SECRET_INTERVAL,
665                 .procname       = "ip6frag_secret_interval",
666                 .data           = &ip6_frags.secret_interval,
667                 .maxlen         = sizeof(int),
668                 .mode           = 0644,
669                 .proc_handler   = &proc_dointvec_jiffies,
670                 .strategy       = &sysctl_jiffies
671         },
672         { }
673 };
674
675 static int ip6_frags_sysctl_register(struct net *net)
676 {
677         struct ctl_table *table;
678         struct ctl_table_header *hdr;
679
680         table = ip6_frags_ctl_table;
681         if (net != &init_net) {
682                 table = kmemdup(table, sizeof(ip6_frags_ctl_table), GFP_KERNEL);
683                 if (table == NULL)
684                         goto err_alloc;
685
686                 table[0].data = &net->ipv6.frags.high_thresh;
687                 table[1].data = &net->ipv6.frags.low_thresh;
688                 table[2].data = &net->ipv6.frags.timeout;
689                 table[3].mode &= ~0222;
690         }
691
692         hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table);
693         if (hdr == NULL)
694                 goto err_reg;
695
696         net->ipv6.sysctl.frags_hdr = hdr;
697         return 0;
698
699 err_reg:
700         if (net != &init_net)
701                 kfree(table);
702 err_alloc:
703         return -ENOMEM;
704 }
705
706 static void ip6_frags_sysctl_unregister(struct net *net)
707 {
708         struct ctl_table *table;
709
710         table = net->ipv6.sysctl.frags_hdr->ctl_table_arg;
711         unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr);
712         kfree(table);
713 }
714 #else
715 static inline int ip6_frags_sysctl_register(struct net *net)
716 {
717         return 0;
718 }
719
720 static inline void ip6_frags_sysctl_unregister(struct net *net)
721 {
722 }
723 #endif
724
725 static int ipv6_frags_init_net(struct net *net)
726 {
727         net->ipv6.frags.high_thresh = 256 * 1024;
728         net->ipv6.frags.low_thresh = 192 * 1024;
729         net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
730
731         inet_frags_init_net(&net->ipv6.frags);
732
733         return ip6_frags_sysctl_register(net);
734 }
735
736 static void ipv6_frags_exit_net(struct net *net)
737 {
738         ip6_frags_sysctl_unregister(net);
739         inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
740 }
741
742 static struct pernet_operations ip6_frags_ops = {
743         .init = ipv6_frags_init_net,
744         .exit = ipv6_frags_exit_net,
745 };
746
747 int __init ipv6_frag_init(void)
748 {
749         int ret;
750
751         ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
752         if (ret)
753                 goto out;
754
755         register_pernet_subsys(&ip6_frags_ops);
756
757         ip6_frags.hashfn = ip6_hashfn;
758         ip6_frags.constructor = ip6_frag_init;
759         ip6_frags.destructor = NULL;
760         ip6_frags.skb_free = NULL;
761         ip6_frags.qsize = sizeof(struct frag_queue);
762         ip6_frags.match = ip6_frag_match;
763         ip6_frags.frag_expire = ip6_frag_expire;
764         ip6_frags.secret_interval = 10 * 60 * HZ;
765         inet_frags_init(&ip6_frags);
766 out:
767         return ret;
768 }
769
770 void ipv6_frag_exit(void)
771 {
772         inet_frags_fini(&ip6_frags);
773         unregister_pernet_subsys(&ip6_frags_ops);
774         inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
775 }