[IPV6]: Replace sk_buff ** with sk_buff * in input handlers
[safe/jmp/linux-2.6] / net / ipv6 / reassembly.c
1 /*
2  *      IPv6 fragment reassembly
3  *      Linux INET6 implementation
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>
7  *
8  *      $Id: reassembly.c,v 1.26 2001/03/07 22:00:57 davem Exp $
9  *
10  *      Based on: net/ipv4/ip_fragment.c
11  *
12  *      This program is free software; you can redistribute it and/or
13  *      modify it under the terms of the GNU General Public License
14  *      as published by the Free Software Foundation; either version
15  *      2 of the License, or (at your option) any later version.
16  */
17
18 /*
19  *      Fixes:
20  *      Andi Kleen      Make it work with multiple hosts.
21  *                      More RFC compliance.
22  *
23  *      Horst von Brand Add missing #include <linux/string.h>
24  *      Alexey Kuznetsov        SMP races, threading, cleanup.
25  *      Patrick McHardy         LRU queue of frag heads for evictor.
26  *      Mitsuru KANDA @USAGI    Register inet6_protocol{}.
27  *      David Stevens and
28  *      YOSHIFUJI,H. @USAGI     Always remove fragment header to
29  *                              calculate ICV correctly.
30  */
31 #include <linux/errno.h>
32 #include <linux/types.h>
33 #include <linux/string.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/jiffies.h>
37 #include <linux/net.h>
38 #include <linux/list.h>
39 #include <linux/netdevice.h>
40 #include <linux/in6.h>
41 #include <linux/ipv6.h>
42 #include <linux/icmpv6.h>
43 #include <linux/random.h>
44 #include <linux/jhash.h>
45 #include <linux/skbuff.h>
46
47 #include <net/sock.h>
48 #include <net/snmp.h>
49
50 #include <net/ipv6.h>
51 #include <net/ip6_route.h>
52 #include <net/protocol.h>
53 #include <net/transp_v6.h>
54 #include <net/rawv6.h>
55 #include <net/ndisc.h>
56 #include <net/addrconf.h>
57 #include <net/inet_frag.h>
58
59 struct ip6frag_skb_cb
60 {
61         struct inet6_skb_parm   h;
62         int                     offset;
63 };
64
65 #define FRAG6_CB(skb)   ((struct ip6frag_skb_cb*)((skb)->cb))
66
67
68 /*
69  *      Equivalent of ipv4 struct ipq
70  */
71
72 struct frag_queue
73 {
74         struct inet_frag_queue  q;
75
76         __be32                  id;             /* fragment id          */
77         struct in6_addr         saddr;
78         struct in6_addr         daddr;
79
80         int                     iif;
81         unsigned int            csum;
82         __u16                   nhoffset;
83 };
84
85 struct inet_frags_ctl ip6_frags_ctl __read_mostly = {
86         .high_thresh     = 256 * 1024,
87         .low_thresh      = 192 * 1024,
88         .timeout         = IPV6_FRAG_TIMEOUT,
89         .secret_interval = 10 * 60 * HZ,
90 };
91
92 static struct inet_frags ip6_frags;
93
94 int ip6_frag_nqueues(void)
95 {
96         return ip6_frags.nqueues;
97 }
98
99 int ip6_frag_mem(void)
100 {
101         return atomic_read(&ip6_frags.mem);
102 }
103
104 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
105                           struct net_device *dev);
106
107 /*
108  * callers should be careful not to use the hash value outside the ipfrag_lock
109  * as doing so could race with ipfrag_hash_rnd being recalculated.
110  */
111 static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
112                                struct in6_addr *daddr)
113 {
114         u32 a, b, c;
115
116         a = (__force u32)saddr->s6_addr32[0];
117         b = (__force u32)saddr->s6_addr32[1];
118         c = (__force u32)saddr->s6_addr32[2];
119
120         a += JHASH_GOLDEN_RATIO;
121         b += JHASH_GOLDEN_RATIO;
122         c += ip6_frags.rnd;
123         __jhash_mix(a, b, c);
124
125         a += (__force u32)saddr->s6_addr32[3];
126         b += (__force u32)daddr->s6_addr32[0];
127         c += (__force u32)daddr->s6_addr32[1];
128         __jhash_mix(a, b, c);
129
130         a += (__force u32)daddr->s6_addr32[2];
131         b += (__force u32)daddr->s6_addr32[3];
132         c += (__force u32)id;
133         __jhash_mix(a, b, c);
134
135         return c & (INETFRAGS_HASHSZ - 1);
136 }
137
138 static unsigned int ip6_hashfn(struct inet_frag_queue *q)
139 {
140         struct frag_queue *fq;
141
142         fq = container_of(q, struct frag_queue, q);
143         return ip6qhashfn(fq->id, &fq->saddr, &fq->daddr);
144 }
145
146 /* Memory Tracking Functions. */
147 static inline void frag_kfree_skb(struct sk_buff *skb, int *work)
148 {
149         if (work)
150                 *work -= skb->truesize;
151         atomic_sub(skb->truesize, &ip6_frags.mem);
152         kfree_skb(skb);
153 }
154
155 static void ip6_frag_free(struct inet_frag_queue *fq)
156 {
157         kfree(container_of(fq, struct frag_queue, q));
158 }
159
160 static inline struct frag_queue *frag_alloc_queue(void)
161 {
162         struct frag_queue *fq = kzalloc(sizeof(struct frag_queue), GFP_ATOMIC);
163
164         if(!fq)
165                 return NULL;
166         atomic_add(sizeof(struct frag_queue), &ip6_frags.mem);
167         return fq;
168 }
169
170 /* Destruction primitives. */
171
172 static __inline__ void fq_put(struct frag_queue *fq)
173 {
174         inet_frag_put(&fq->q, &ip6_frags);
175 }
176
177 /* Kill fq entry. It is not destroyed immediately,
178  * because caller (and someone more) holds reference count.
179  */
180 static __inline__ void fq_kill(struct frag_queue *fq)
181 {
182         inet_frag_kill(&fq->q, &ip6_frags);
183 }
184
185 static void ip6_evictor(struct inet6_dev *idev)
186 {
187         int evicted;
188
189         evicted = inet_frag_evictor(&ip6_frags);
190         if (evicted)
191                 IP6_ADD_STATS_BH(idev, IPSTATS_MIB_REASMFAILS, evicted);
192 }
193
194 static void ip6_frag_expire(unsigned long data)
195 {
196         struct frag_queue *fq = (struct frag_queue *) data;
197         struct net_device *dev = NULL;
198
199         spin_lock(&fq->q.lock);
200
201         if (fq->q.last_in & COMPLETE)
202                 goto out;
203
204         fq_kill(fq);
205
206         dev = dev_get_by_index(&init_net, fq->iif);
207         if (!dev)
208                 goto out;
209
210         rcu_read_lock();
211         IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
212         IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
213         rcu_read_unlock();
214
215         /* Don't send error if the first segment did not arrive. */
216         if (!(fq->q.last_in&FIRST_IN) || !fq->q.fragments)
217                 goto out;
218
219         /*
220            But use as source device on which LAST ARRIVED
221            segment was received. And do not use fq->dev
222            pointer directly, device might already disappeared.
223          */
224         fq->q.fragments->dev = dev;
225         icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev);
226 out:
227         if (dev)
228                 dev_put(dev);
229         spin_unlock(&fq->q.lock);
230         fq_put(fq);
231 }
232
233 /* Creation primitives. */
234
235
236 static struct frag_queue *ip6_frag_intern(struct frag_queue *fq_in)
237 {
238         struct frag_queue *fq;
239         unsigned int hash;
240 #ifdef CONFIG_SMP
241         struct hlist_node *n;
242 #endif
243
244         write_lock(&ip6_frags.lock);
245         hash = ip6qhashfn(fq_in->id, &fq_in->saddr, &fq_in->daddr);
246 #ifdef CONFIG_SMP
247         hlist_for_each_entry(fq, n, &ip6_frags.hash[hash], q.list) {
248                 if (fq->id == fq_in->id &&
249                     ipv6_addr_equal(&fq_in->saddr, &fq->saddr) &&
250                     ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) {
251                         atomic_inc(&fq->q.refcnt);
252                         write_unlock(&ip6_frags.lock);
253                         fq_in->q.last_in |= COMPLETE;
254                         fq_put(fq_in);
255                         return fq;
256                 }
257         }
258 #endif
259         fq = fq_in;
260
261         if (!mod_timer(&fq->q.timer, jiffies + ip6_frags_ctl.timeout))
262                 atomic_inc(&fq->q.refcnt);
263
264         atomic_inc(&fq->q.refcnt);
265         hlist_add_head(&fq->q.list, &ip6_frags.hash[hash]);
266         INIT_LIST_HEAD(&fq->q.lru_list);
267         list_add_tail(&fq->q.lru_list, &ip6_frags.lru_list);
268         ip6_frags.nqueues++;
269         write_unlock(&ip6_frags.lock);
270         return fq;
271 }
272
273
274 static struct frag_queue *
275 ip6_frag_create(__be32 id, struct in6_addr *src, struct in6_addr *dst,
276                 struct inet6_dev *idev)
277 {
278         struct frag_queue *fq;
279
280         if ((fq = frag_alloc_queue()) == NULL)
281                 goto oom;
282
283         fq->id = id;
284         ipv6_addr_copy(&fq->saddr, src);
285         ipv6_addr_copy(&fq->daddr, dst);
286
287         init_timer(&fq->q.timer);
288         fq->q.timer.function = ip6_frag_expire;
289         fq->q.timer.data = (long) fq;
290         spin_lock_init(&fq->q.lock);
291         atomic_set(&fq->q.refcnt, 1);
292
293         return ip6_frag_intern(fq);
294
295 oom:
296         IP6_INC_STATS_BH(idev, IPSTATS_MIB_REASMFAILS);
297         return NULL;
298 }
299
300 static __inline__ struct frag_queue *
301 fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst,
302         struct inet6_dev *idev)
303 {
304         struct frag_queue *fq;
305         struct hlist_node *n;
306         unsigned int hash;
307
308         read_lock(&ip6_frags.lock);
309         hash = ip6qhashfn(id, src, dst);
310         hlist_for_each_entry(fq, n, &ip6_frags.hash[hash], q.list) {
311                 if (fq->id == id &&
312                     ipv6_addr_equal(src, &fq->saddr) &&
313                     ipv6_addr_equal(dst, &fq->daddr)) {
314                         atomic_inc(&fq->q.refcnt);
315                         read_unlock(&ip6_frags.lock);
316                         return fq;
317                 }
318         }
319         read_unlock(&ip6_frags.lock);
320
321         return ip6_frag_create(id, src, dst, idev);
322 }
323
324
325 static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
326                            struct frag_hdr *fhdr, int nhoff)
327 {
328         struct sk_buff *prev, *next;
329         struct net_device *dev;
330         int offset, end;
331
332         if (fq->q.last_in & COMPLETE)
333                 goto err;
334
335         offset = ntohs(fhdr->frag_off) & ~0x7;
336         end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
337                         ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
338
339         if ((unsigned int)end > IPV6_MAXPLEN) {
340                 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
341                                  IPSTATS_MIB_INHDRERRORS);
342                 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
343                                   ((u8 *)&fhdr->frag_off -
344                                    skb_network_header(skb)));
345                 return -1;
346         }
347
348         if (skb->ip_summed == CHECKSUM_COMPLETE) {
349                 const unsigned char *nh = skb_network_header(skb);
350                 skb->csum = csum_sub(skb->csum,
351                                      csum_partial(nh, (u8 *)(fhdr + 1) - nh,
352                                                   0));
353         }
354
355         /* Is this the final fragment? */
356         if (!(fhdr->frag_off & htons(IP6_MF))) {
357                 /* If we already have some bits beyond end
358                  * or have different end, the segment is corrupted.
359                  */
360                 if (end < fq->q.len ||
361                     ((fq->q.last_in & LAST_IN) && end != fq->q.len))
362                         goto err;
363                 fq->q.last_in |= LAST_IN;
364                 fq->q.len = end;
365         } else {
366                 /* Check if the fragment is rounded to 8 bytes.
367                  * Required by the RFC.
368                  */
369                 if (end & 0x7) {
370                         /* RFC2460 says always send parameter problem in
371                          * this case. -DaveM
372                          */
373                         IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
374                                          IPSTATS_MIB_INHDRERRORS);
375                         icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
376                                           offsetof(struct ipv6hdr, payload_len));
377                         return -1;
378                 }
379                 if (end > fq->q.len) {
380                         /* Some bits beyond end -> corruption. */
381                         if (fq->q.last_in & LAST_IN)
382                                 goto err;
383                         fq->q.len = end;
384                 }
385         }
386
387         if (end == offset)
388                 goto err;
389
390         /* Point into the IP datagram 'data' part. */
391         if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
392                 goto err;
393
394         if (pskb_trim_rcsum(skb, end - offset))
395                 goto err;
396
397         /* Find out which fragments are in front and at the back of us
398          * in the chain of fragments so far.  We must know where to put
399          * this fragment, right?
400          */
401         prev = NULL;
402         for(next = fq->q.fragments; next != NULL; next = next->next) {
403                 if (FRAG6_CB(next)->offset >= offset)
404                         break;  /* bingo! */
405                 prev = next;
406         }
407
408         /* We found where to put this one.  Check for overlap with
409          * preceding fragment, and, if needed, align things so that
410          * any overlaps are eliminated.
411          */
412         if (prev) {
413                 int i = (FRAG6_CB(prev)->offset + prev->len) - offset;
414
415                 if (i > 0) {
416                         offset += i;
417                         if (end <= offset)
418                                 goto err;
419                         if (!pskb_pull(skb, i))
420                                 goto err;
421                         if (skb->ip_summed != CHECKSUM_UNNECESSARY)
422                                 skb->ip_summed = CHECKSUM_NONE;
423                 }
424         }
425
426         /* Look for overlap with succeeding segments.
427          * If we can merge fragments, do it.
428          */
429         while (next && FRAG6_CB(next)->offset < end) {
430                 int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */
431
432                 if (i < next->len) {
433                         /* Eat head of the next overlapped fragment
434                          * and leave the loop. The next ones cannot overlap.
435                          */
436                         if (!pskb_pull(next, i))
437                                 goto err;
438                         FRAG6_CB(next)->offset += i;    /* next fragment */
439                         fq->q.meat -= i;
440                         if (next->ip_summed != CHECKSUM_UNNECESSARY)
441                                 next->ip_summed = CHECKSUM_NONE;
442                         break;
443                 } else {
444                         struct sk_buff *free_it = next;
445
446                         /* Old fragment is completely overridden with
447                          * new one drop it.
448                          */
449                         next = next->next;
450
451                         if (prev)
452                                 prev->next = next;
453                         else
454                                 fq->q.fragments = next;
455
456                         fq->q.meat -= free_it->len;
457                         frag_kfree_skb(free_it, NULL);
458                 }
459         }
460
461         FRAG6_CB(skb)->offset = offset;
462
463         /* Insert this fragment in the chain of fragments. */
464         skb->next = next;
465         if (prev)
466                 prev->next = skb;
467         else
468                 fq->q.fragments = skb;
469
470         dev = skb->dev;
471         if (dev) {
472                 fq->iif = dev->ifindex;
473                 skb->dev = NULL;
474         }
475         fq->q.stamp = skb->tstamp;
476         fq->q.meat += skb->len;
477         atomic_add(skb->truesize, &ip6_frags.mem);
478
479         /* The first fragment.
480          * nhoffset is obtained from the first fragment, of course.
481          */
482         if (offset == 0) {
483                 fq->nhoffset = nhoff;
484                 fq->q.last_in |= FIRST_IN;
485         }
486
487         if (fq->q.last_in == (FIRST_IN | LAST_IN) && fq->q.meat == fq->q.len)
488                 return ip6_frag_reasm(fq, prev, dev);
489
490         write_lock(&ip6_frags.lock);
491         list_move_tail(&fq->q.lru_list, &ip6_frags.lru_list);
492         write_unlock(&ip6_frags.lock);
493         return -1;
494
495 err:
496         IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS);
497         kfree_skb(skb);
498         return -1;
499 }
500
501 /*
502  *      Check if this packet is complete.
503  *      Returns NULL on failure by any reason, and pointer
504  *      to current nexthdr field in reassembled frame.
505  *
506  *      It is called with locked fq, and caller must check that
507  *      queue is eligible for reassembly i.e. it is not COMPLETE,
508  *      the last and the first frames arrived and all the bits are here.
509  */
510 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
511                           struct net_device *dev)
512 {
513         struct sk_buff *fp, *head = fq->q.fragments;
514         int    payload_len;
515         unsigned int nhoff;
516
517         fq_kill(fq);
518
519         /* Make the one we just received the head. */
520         if (prev) {
521                 head = prev->next;
522                 fp = skb_clone(head, GFP_ATOMIC);
523
524                 if (!fp)
525                         goto out_oom;
526
527                 fp->next = head->next;
528                 prev->next = fp;
529
530                 skb_morph(head, fq->q.fragments);
531                 head->next = fq->q.fragments->next;
532
533                 kfree_skb(fq->q.fragments);
534                 fq->q.fragments = head;
535         }
536
537         BUG_TRAP(head != NULL);
538         BUG_TRAP(FRAG6_CB(head)->offset == 0);
539
540         /* Unfragmented part is taken from the first segment. */
541         payload_len = ((head->data - skb_network_header(head)) -
542                        sizeof(struct ipv6hdr) + fq->q.len -
543                        sizeof(struct frag_hdr));
544         if (payload_len > IPV6_MAXPLEN)
545                 goto out_oversize;
546
547         /* Head of list must not be cloned. */
548         if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
549                 goto out_oom;
550
551         /* If the first fragment is fragmented itself, we split
552          * it to two chunks: the first with data and paged part
553          * and the second, holding only fragments. */
554         if (skb_shinfo(head)->frag_list) {
555                 struct sk_buff *clone;
556                 int i, plen = 0;
557
558                 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
559                         goto out_oom;
560                 clone->next = head->next;
561                 head->next = clone;
562                 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
563                 skb_shinfo(head)->frag_list = NULL;
564                 for (i=0; i<skb_shinfo(head)->nr_frags; i++)
565                         plen += skb_shinfo(head)->frags[i].size;
566                 clone->len = clone->data_len = head->data_len - plen;
567                 head->data_len -= clone->len;
568                 head->len -= clone->len;
569                 clone->csum = 0;
570                 clone->ip_summed = head->ip_summed;
571                 atomic_add(clone->truesize, &ip6_frags.mem);
572         }
573
574         /* We have to remove fragment header from datagram and to relocate
575          * header in order to calculate ICV correctly. */
576         nhoff = fq->nhoffset;
577         skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
578         memmove(head->head + sizeof(struct frag_hdr), head->head,
579                 (head->data - head->head) - sizeof(struct frag_hdr));
580         head->mac_header += sizeof(struct frag_hdr);
581         head->network_header += sizeof(struct frag_hdr);
582
583         skb_shinfo(head)->frag_list = head->next;
584         skb_reset_transport_header(head);
585         skb_push(head, head->data - skb_network_header(head));
586         atomic_sub(head->truesize, &ip6_frags.mem);
587
588         for (fp=head->next; fp; fp = fp->next) {
589                 head->data_len += fp->len;
590                 head->len += fp->len;
591                 if (head->ip_summed != fp->ip_summed)
592                         head->ip_summed = CHECKSUM_NONE;
593                 else if (head->ip_summed == CHECKSUM_COMPLETE)
594                         head->csum = csum_add(head->csum, fp->csum);
595                 head->truesize += fp->truesize;
596                 atomic_sub(fp->truesize, &ip6_frags.mem);
597         }
598
599         head->next = NULL;
600         head->dev = dev;
601         head->tstamp = fq->q.stamp;
602         ipv6_hdr(head)->payload_len = htons(payload_len);
603         IP6CB(head)->nhoff = nhoff;
604
605         /* Yes, and fold redundant checksum back. 8) */
606         if (head->ip_summed == CHECKSUM_COMPLETE)
607                 head->csum = csum_partial(skb_network_header(head),
608                                           skb_network_header_len(head),
609                                           head->csum);
610
611         rcu_read_lock();
612         IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
613         rcu_read_unlock();
614         fq->q.fragments = NULL;
615         return 1;
616
617 out_oversize:
618         if (net_ratelimit())
619                 printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
620         goto out_fail;
621 out_oom:
622         if (net_ratelimit())
623                 printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
624 out_fail:
625         rcu_read_lock();
626         IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
627         rcu_read_unlock();
628         return -1;
629 }
630
631 static int ipv6_frag_rcv(struct sk_buff *skb)
632 {
633         struct frag_hdr *fhdr;
634         struct frag_queue *fq;
635         struct ipv6hdr *hdr = ipv6_hdr(skb);
636
637         IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMREQDS);
638
639         /* Jumbo payload inhibits frag. header */
640         if (hdr->payload_len==0) {
641                 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
642                 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
643                                   skb_network_header_len(skb));
644                 return -1;
645         }
646         if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
647                                  sizeof(struct frag_hdr)))) {
648                 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
649                 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
650                                   skb_network_header_len(skb));
651                 return -1;
652         }
653
654         hdr = ipv6_hdr(skb);
655         fhdr = (struct frag_hdr *)skb_transport_header(skb);
656
657         if (!(fhdr->frag_off & htons(0xFFF9))) {
658                 /* It is not a fragmented frame */
659                 skb->transport_header += sizeof(struct frag_hdr);
660                 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMOKS);
661
662                 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
663                 return 1;
664         }
665
666         if (atomic_read(&ip6_frags.mem) > ip6_frags_ctl.high_thresh)
667                 ip6_evictor(ip6_dst_idev(skb->dst));
668
669         if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr,
670                           ip6_dst_idev(skb->dst))) != NULL) {
671                 int ret;
672
673                 spin_lock(&fq->q.lock);
674
675                 ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
676
677                 spin_unlock(&fq->q.lock);
678                 fq_put(fq);
679                 return ret;
680         }
681
682         IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS);
683         kfree_skb(skb);
684         return -1;
685 }
686
687 static struct inet6_protocol frag_protocol =
688 {
689         .handler        =       ipv6_frag_rcv,
690         .flags          =       INET6_PROTO_NOPOLICY,
691 };
692
693 void __init ipv6_frag_init(void)
694 {
695         if (inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT) < 0)
696                 printk(KERN_ERR "ipv6_frag_init: Could not register protocol\n");
697
698         ip6_frags.ctl = &ip6_frags_ctl;
699         ip6_frags.hashfn = ip6_hashfn;
700         ip6_frags.destructor = ip6_frag_free;
701         ip6_frags.skb_free = NULL;
702         ip6_frags.qsize = sizeof(struct frag_queue);
703         inet_frags_init(&ip6_frags);
704 }