[TCP]: MD5 Signature Option (RFC2385) support.
[safe/jmp/linux-2.6] / net / ipv6 / tcp_ipv6.c
1 /*
2  *      TCP over IPv6
3  *      Linux INET6 implementation 
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>     
7  *
8  *      $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
9  *
10  *      Based on: 
11  *      linux/net/ipv4/tcp.c
12  *      linux/net/ipv4/tcp_input.c
13  *      linux/net/ipv4/tcp_output.c
14  *
15  *      Fixes:
16  *      Hideaki YOSHIFUJI       :       sin6_scope_id support
17  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
18  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
19  *                                      a single port at the same time.
20  *      YOSHIFUJI Hideaki @USAGI:       convert /proc/net/tcp6 to seq_file.
21  *
22  *      This program is free software; you can redistribute it and/or
23  *      modify it under the terms of the GNU General Public License
24  *      as published by the Free Software Foundation; either version
25  *      2 of the License, or (at your option) any later version.
26  */
27
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/net.h>
34 #include <linux/jiffies.h>
35 #include <linux/in.h>
36 #include <linux/in6.h>
37 #include <linux/netdevice.h>
38 #include <linux/init.h>
39 #include <linux/jhash.h>
40 #include <linux/ipsec.h>
41 #include <linux/times.h>
42
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/addrconf.h>
60 #include <net/snmp.h>
61 #include <net/dsfield.h>
62 #include <net/timewait_sock.h>
63
64 #include <asm/uaccess.h>
65
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
68
69 #include <linux/crypto.h>
70 #include <linux/scatterlist.h>
71
72 /* Socket used for sending RSTs and ACKs */
73 static struct socket *tcp6_socket;
74
75 static void     tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
76 static void     tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
77 static void     tcp_v6_send_check(struct sock *sk, int len, 
78                                   struct sk_buff *skb);
79
80 static int      tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
81
82 static struct inet_connection_sock_af_ops ipv6_mapped;
83 static struct inet_connection_sock_af_ops ipv6_specific;
84 static struct tcp_sock_af_ops tcp_sock_ipv6_specific;
85 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
86
87 static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
88 {
89         return inet_csk_get_port(&tcp_hashinfo, sk, snum,
90                                  inet6_csk_bind_conflict);
91 }
92
93 static void tcp_v6_hash(struct sock *sk)
94 {
95         if (sk->sk_state != TCP_CLOSE) {
96                 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
97                         tcp_prot.hash(sk);
98                         return;
99                 }
100                 local_bh_disable();
101                 __inet6_hash(&tcp_hashinfo, sk);
102                 local_bh_enable();
103         }
104 }
105
106 static __inline__ u16 tcp_v6_check(struct tcphdr *th, int len,
107                                    struct in6_addr *saddr, 
108                                    struct in6_addr *daddr, 
109                                    unsigned long base)
110 {
111         return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
112 }
113
114 static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
115 {
116         return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
117                                             skb->nh.ipv6h->saddr.s6_addr32,
118                                             skb->h.th->dest,
119                                             skb->h.th->source);
120 }
121
122 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, 
123                           int addr_len)
124 {
125         struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
126         struct inet_sock *inet = inet_sk(sk);
127         struct inet_connection_sock *icsk = inet_csk(sk);
128         struct ipv6_pinfo *np = inet6_sk(sk);
129         struct tcp_sock *tp = tcp_sk(sk);
130         struct in6_addr *saddr = NULL, *final_p = NULL, final;
131         struct flowi fl;
132         struct dst_entry *dst;
133         int addr_type;
134         int err;
135
136         if (addr_len < SIN6_LEN_RFC2133) 
137                 return -EINVAL;
138
139         if (usin->sin6_family != AF_INET6) 
140                 return(-EAFNOSUPPORT);
141
142         memset(&fl, 0, sizeof(fl));
143
144         if (np->sndflow) {
145                 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
146                 IP6_ECN_flow_init(fl.fl6_flowlabel);
147                 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
148                         struct ip6_flowlabel *flowlabel;
149                         flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
150                         if (flowlabel == NULL)
151                                 return -EINVAL;
152                         ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
153                         fl6_sock_release(flowlabel);
154                 }
155         }
156
157         /*
158          *      connect() to INADDR_ANY means loopback (BSD'ism).
159          */
160         
161         if(ipv6_addr_any(&usin->sin6_addr))
162                 usin->sin6_addr.s6_addr[15] = 0x1; 
163
164         addr_type = ipv6_addr_type(&usin->sin6_addr);
165
166         if(addr_type & IPV6_ADDR_MULTICAST)
167                 return -ENETUNREACH;
168
169         if (addr_type&IPV6_ADDR_LINKLOCAL) {
170                 if (addr_len >= sizeof(struct sockaddr_in6) &&
171                     usin->sin6_scope_id) {
172                         /* If interface is set while binding, indices
173                          * must coincide.
174                          */
175                         if (sk->sk_bound_dev_if &&
176                             sk->sk_bound_dev_if != usin->sin6_scope_id)
177                                 return -EINVAL;
178
179                         sk->sk_bound_dev_if = usin->sin6_scope_id;
180                 }
181
182                 /* Connect to link-local address requires an interface */
183                 if (!sk->sk_bound_dev_if)
184                         return -EINVAL;
185         }
186
187         if (tp->rx_opt.ts_recent_stamp &&
188             !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
189                 tp->rx_opt.ts_recent = 0;
190                 tp->rx_opt.ts_recent_stamp = 0;
191                 tp->write_seq = 0;
192         }
193
194         ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
195         np->flow_label = fl.fl6_flowlabel;
196
197         /*
198          *      TCP over IPv4
199          */
200
201         if (addr_type == IPV6_ADDR_MAPPED) {
202                 u32 exthdrlen = icsk->icsk_ext_hdr_len;
203                 struct sockaddr_in sin;
204
205                 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
206
207                 if (__ipv6_only_sock(sk))
208                         return -ENETUNREACH;
209
210                 sin.sin_family = AF_INET;
211                 sin.sin_port = usin->sin6_port;
212                 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
213
214                 icsk->icsk_af_ops = &ipv6_mapped;
215                 sk->sk_backlog_rcv = tcp_v4_do_rcv;
216 #ifdef CONFIG_TCP_MD5SIG
217                 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
218 #endif
219
220                 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
221
222                 if (err) {
223                         icsk->icsk_ext_hdr_len = exthdrlen;
224                         icsk->icsk_af_ops = &ipv6_specific;
225                         sk->sk_backlog_rcv = tcp_v6_do_rcv;
226 #ifdef CONFIG_TCP_MD5SIG
227                         tp->af_specific = &tcp_sock_ipv6_specific;
228 #endif
229                         goto failure;
230                 } else {
231                         ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
232                                       inet->saddr);
233                         ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
234                                       inet->rcv_saddr);
235                 }
236
237                 return err;
238         }
239
240         if (!ipv6_addr_any(&np->rcv_saddr))
241                 saddr = &np->rcv_saddr;
242
243         fl.proto = IPPROTO_TCP;
244         ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
245         ipv6_addr_copy(&fl.fl6_src,
246                        (saddr ? saddr : &np->saddr));
247         fl.oif = sk->sk_bound_dev_if;
248         fl.fl_ip_dport = usin->sin6_port;
249         fl.fl_ip_sport = inet->sport;
250
251         if (np->opt && np->opt->srcrt) {
252                 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
253                 ipv6_addr_copy(&final, &fl.fl6_dst);
254                 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
255                 final_p = &final;
256         }
257
258         security_sk_classify_flow(sk, &fl);
259
260         err = ip6_dst_lookup(sk, &dst, &fl);
261         if (err)
262                 goto failure;
263         if (final_p)
264                 ipv6_addr_copy(&fl.fl6_dst, final_p);
265
266         if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
267                 goto failure;
268
269         if (saddr == NULL) {
270                 saddr = &fl.fl6_src;
271                 ipv6_addr_copy(&np->rcv_saddr, saddr);
272         }
273
274         /* set the source address */
275         ipv6_addr_copy(&np->saddr, saddr);
276         inet->rcv_saddr = LOOPBACK4_IPV6;
277
278         sk->sk_gso_type = SKB_GSO_TCPV6;
279         __ip6_dst_store(sk, dst, NULL, NULL);
280
281         icsk->icsk_ext_hdr_len = 0;
282         if (np->opt)
283                 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
284                                           np->opt->opt_nflen);
285
286         tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
287
288         inet->dport = usin->sin6_port;
289
290         tcp_set_state(sk, TCP_SYN_SENT);
291         err = inet6_hash_connect(&tcp_death_row, sk);
292         if (err)
293                 goto late_failure;
294
295         if (!tp->write_seq)
296                 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
297                                                              np->daddr.s6_addr32,
298                                                              inet->sport,
299                                                              inet->dport);
300
301         err = tcp_connect(sk);
302         if (err)
303                 goto late_failure;
304
305         return 0;
306
307 late_failure:
308         tcp_set_state(sk, TCP_CLOSE);
309         __sk_dst_reset(sk);
310 failure:
311         inet->dport = 0;
312         sk->sk_route_caps = 0;
313         return err;
314 }
315
316 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
317                 int type, int code, int offset, __be32 info)
318 {
319         struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
320         const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
321         struct ipv6_pinfo *np;
322         struct sock *sk;
323         int err;
324         struct tcp_sock *tp; 
325         __u32 seq;
326
327         sk = inet6_lookup(&tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr,
328                           th->source, skb->dev->ifindex);
329
330         if (sk == NULL) {
331                 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
332                 return;
333         }
334
335         if (sk->sk_state == TCP_TIME_WAIT) {
336                 inet_twsk_put(inet_twsk(sk));
337                 return;
338         }
339
340         bh_lock_sock(sk);
341         if (sock_owned_by_user(sk))
342                 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
343
344         if (sk->sk_state == TCP_CLOSE)
345                 goto out;
346
347         tp = tcp_sk(sk);
348         seq = ntohl(th->seq); 
349         if (sk->sk_state != TCP_LISTEN &&
350             !between(seq, tp->snd_una, tp->snd_nxt)) {
351                 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
352                 goto out;
353         }
354
355         np = inet6_sk(sk);
356
357         if (type == ICMPV6_PKT_TOOBIG) {
358                 struct dst_entry *dst = NULL;
359
360                 if (sock_owned_by_user(sk))
361                         goto out;
362                 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
363                         goto out;
364
365                 /* icmp should have updated the destination cache entry */
366                 dst = __sk_dst_check(sk, np->dst_cookie);
367
368                 if (dst == NULL) {
369                         struct inet_sock *inet = inet_sk(sk);
370                         struct flowi fl;
371
372                         /* BUGGG_FUTURE: Again, it is not clear how
373                            to handle rthdr case. Ignore this complexity
374                            for now.
375                          */
376                         memset(&fl, 0, sizeof(fl));
377                         fl.proto = IPPROTO_TCP;
378                         ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
379                         ipv6_addr_copy(&fl.fl6_src, &np->saddr);
380                         fl.oif = sk->sk_bound_dev_if;
381                         fl.fl_ip_dport = inet->dport;
382                         fl.fl_ip_sport = inet->sport;
383                         security_skb_classify_flow(skb, &fl);
384
385                         if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
386                                 sk->sk_err_soft = -err;
387                                 goto out;
388                         }
389
390                         if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
391                                 sk->sk_err_soft = -err;
392                                 goto out;
393                         }
394
395                 } else
396                         dst_hold(dst);
397
398                 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
399                         tcp_sync_mss(sk, dst_mtu(dst));
400                         tcp_simple_retransmit(sk);
401                 } /* else let the usual retransmit timer handle it */
402                 dst_release(dst);
403                 goto out;
404         }
405
406         icmpv6_err_convert(type, code, &err);
407
408         /* Might be for an request_sock */
409         switch (sk->sk_state) {
410                 struct request_sock *req, **prev;
411         case TCP_LISTEN:
412                 if (sock_owned_by_user(sk))
413                         goto out;
414
415                 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
416                                            &hdr->saddr, inet6_iif(skb));
417                 if (!req)
418                         goto out;
419
420                 /* ICMPs are not backlogged, hence we cannot get
421                  * an established socket here.
422                  */
423                 BUG_TRAP(req->sk == NULL);
424
425                 if (seq != tcp_rsk(req)->snt_isn) {
426                         NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
427                         goto out;
428                 }
429
430                 inet_csk_reqsk_queue_drop(sk, req, prev);
431                 goto out;
432
433         case TCP_SYN_SENT:
434         case TCP_SYN_RECV:  /* Cannot happen.
435                                It can, it SYNs are crossed. --ANK */ 
436                 if (!sock_owned_by_user(sk)) {
437                         sk->sk_err = err;
438                         sk->sk_error_report(sk);                /* Wake people up to see the error (see connect in sock.c) */
439
440                         tcp_done(sk);
441                 } else
442                         sk->sk_err_soft = err;
443                 goto out;
444         }
445
446         if (!sock_owned_by_user(sk) && np->recverr) {
447                 sk->sk_err = err;
448                 sk->sk_error_report(sk);
449         } else
450                 sk->sk_err_soft = err;
451
452 out:
453         bh_unlock_sock(sk);
454         sock_put(sk);
455 }
456
457
458 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
459                               struct dst_entry *dst)
460 {
461         struct inet6_request_sock *treq = inet6_rsk(req);
462         struct ipv6_pinfo *np = inet6_sk(sk);
463         struct sk_buff * skb;
464         struct ipv6_txoptions *opt = NULL;
465         struct in6_addr * final_p = NULL, final;
466         struct flowi fl;
467         int err = -1;
468
469         memset(&fl, 0, sizeof(fl));
470         fl.proto = IPPROTO_TCP;
471         ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
472         ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
473         fl.fl6_flowlabel = 0;
474         fl.oif = treq->iif;
475         fl.fl_ip_dport = inet_rsk(req)->rmt_port;
476         fl.fl_ip_sport = inet_sk(sk)->sport;
477         security_req_classify_flow(req, &fl);
478
479         if (dst == NULL) {
480                 opt = np->opt;
481                 if (opt == NULL &&
482                     np->rxopt.bits.osrcrt == 2 &&
483                     treq->pktopts) {
484                         struct sk_buff *pktopts = treq->pktopts;
485                         struct inet6_skb_parm *rxopt = IP6CB(pktopts);
486                         if (rxopt->srcrt)
487                                 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(pktopts->nh.raw + rxopt->srcrt));
488                 }
489
490                 if (opt && opt->srcrt) {
491                         struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
492                         ipv6_addr_copy(&final, &fl.fl6_dst);
493                         ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
494                         final_p = &final;
495                 }
496
497                 err = ip6_dst_lookup(sk, &dst, &fl);
498                 if (err)
499                         goto done;
500                 if (final_p)
501                         ipv6_addr_copy(&fl.fl6_dst, final_p);
502                 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
503                         goto done;
504         }
505
506         skb = tcp_make_synack(sk, dst, req);
507         if (skb) {
508                 struct tcphdr *th = skb->h.th;
509
510                 th->check = tcp_v6_check(th, skb->len,
511                                          &treq->loc_addr, &treq->rmt_addr,
512                                          csum_partial((char *)th, skb->len, skb->csum));
513
514                 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
515                 err = ip6_xmit(sk, skb, &fl, opt, 0);
516                 err = net_xmit_eval(err);
517         }
518
519 done:
520         if (opt && opt != np->opt)
521                 sock_kfree_s(sk, opt, opt->tot_len);
522         dst_release(dst);
523         return err;
524 }
525
526 static void tcp_v6_reqsk_destructor(struct request_sock *req)
527 {
528         if (inet6_rsk(req)->pktopts)
529                 kfree_skb(inet6_rsk(req)->pktopts);
530 }
531
532 #ifdef CONFIG_TCP_MD5SIG
533 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
534                                                    struct in6_addr *addr)
535 {
536         struct tcp_sock *tp = tcp_sk(sk);
537         int i;
538
539         BUG_ON(tp == NULL);
540
541         if (!tp->md5sig_info || !tp->md5sig_info->entries6)
542                 return NULL;
543
544         for (i = 0; i < tp->md5sig_info->entries6; i++) {
545                 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0)
546                         return (struct tcp_md5sig_key *)&tp->md5sig_info->keys6[i];
547         }
548         return NULL;
549 }
550
551 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
552                                                 struct sock *addr_sk)
553 {
554         return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
555 }
556
557 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
558                                                       struct request_sock *req)
559 {
560         return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
561 }
562
563 static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
564                              char *newkey, u8 newkeylen)
565 {
566         /* Add key to the list */
567         struct tcp6_md5sig_key *key;
568         struct tcp_sock *tp = tcp_sk(sk);
569         struct tcp6_md5sig_key *keys;
570
571         key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer);
572         if (key) {
573                 /* modify existing entry - just update that one */
574                 kfree(key->key);
575                 key->key = newkey;
576                 key->keylen = newkeylen;
577         } else {
578                 /* reallocate new list if current one is full. */
579                 if (!tp->md5sig_info) {
580                         tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
581                         if (!tp->md5sig_info) {
582                                 kfree(newkey);
583                                 return -ENOMEM;
584                         }
585                 }
586                 tcp_alloc_md5sig_pool();
587                 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
588                         keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
589                                        (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
590
591                         if (!keys) {
592                                 tcp_free_md5sig_pool();
593                                 kfree(newkey);
594                                 return -ENOMEM;
595                         }
596
597                         if (tp->md5sig_info->entries6)
598                                 memmove(keys, tp->md5sig_info->keys6,
599                                         (sizeof (tp->md5sig_info->keys6[0]) *
600                                          tp->md5sig_info->entries6));
601
602                         kfree(tp->md5sig_info->keys6);
603                         tp->md5sig_info->keys6 = keys;
604                         tp->md5sig_info->alloced6++;
605                 }
606
607                 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
608                                peer);
609                 tp->md5sig_info->keys6[tp->md5sig_info->entries6].key = newkey;
610                 tp->md5sig_info->keys6[tp->md5sig_info->entries6].keylen = newkeylen;
611
612                 tp->md5sig_info->entries6++;
613         }
614         return 0;
615 }
616
617 static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
618                                u8 *newkey, __u8 newkeylen)
619 {
620         return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
621                                  newkey, newkeylen);
622 }
623
624 static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
625 {
626         struct tcp_sock *tp = tcp_sk(sk);
627         int i;
628
629         for (i = 0; i < tp->md5sig_info->entries6; i++) {
630                 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) {
631                         /* Free the key */
632                         kfree(tp->md5sig_info->keys6[i].key);
633                         tp->md5sig_info->entries6--;
634
635                         if (tp->md5sig_info->entries6 == 0) {
636                                 kfree(tp->md5sig_info->keys6);
637                                 tp->md5sig_info->keys6 = NULL;
638
639                                 tcp_free_md5sig_pool();
640
641                                 return 0;
642                         } else {
643                                 /* shrink the database */
644                                 if (tp->md5sig_info->entries6 != i)
645                                         memmove(&tp->md5sig_info->keys6[i],
646                                                 &tp->md5sig_info->keys6[i+1],
647                                                 (tp->md5sig_info->entries6 - i)
648                                                 * sizeof (tp->md5sig_info->keys6[0]));
649                         }
650                 }
651         }
652         return -ENOENT;
653 }
654
655 static void tcp_v6_clear_md5_list (struct sock *sk)
656 {
657         struct tcp_sock *tp = tcp_sk(sk);
658         int i;
659
660         if (tp->md5sig_info->entries6) {
661                 for (i = 0; i < tp->md5sig_info->entries6; i++)
662                         kfree(tp->md5sig_info->keys6[i].key);
663                 tp->md5sig_info->entries6 = 0;
664                 tcp_free_md5sig_pool();
665         }
666
667         kfree(tp->md5sig_info->keys6);
668         tp->md5sig_info->keys6 = NULL;
669         tp->md5sig_info->alloced6 = 0;
670
671         if (tp->md5sig_info->entries4) {
672                 for (i = 0; i < tp->md5sig_info->entries4; i++)
673                         kfree(tp->md5sig_info->keys4[i].key);
674                 tp->md5sig_info->entries4 = 0;
675                 tcp_free_md5sig_pool();
676         }
677
678         kfree(tp->md5sig_info->keys4);
679         tp->md5sig_info->keys4 = NULL;
680         tp->md5sig_info->alloced4 = 0;
681 }
682
683 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
684                                   int optlen)
685 {
686         struct tcp_md5sig cmd;
687         struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
688         u8 *newkey;
689
690         if (optlen < sizeof(cmd))
691                 return -EINVAL;
692
693         if (copy_from_user(&cmd, optval, sizeof(cmd)))
694                 return -EFAULT;
695
696         if (sin6->sin6_family != AF_INET6)
697                 return -EINVAL;
698
699         if (!cmd.tcpm_keylen) {
700                 if (!tcp_sk(sk)->md5sig_info)
701                         return -ENOENT;
702                 if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED)
703                         return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
704                 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
705         }
706
707         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
708                 return -EINVAL;
709
710         if (!tcp_sk(sk)->md5sig_info) {
711                 struct tcp_sock *tp = tcp_sk(sk);
712                 struct tcp_md5sig_info *p;
713
714                 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
715                 if (!p)
716                         return -ENOMEM;
717
718                 tp->md5sig_info = p;
719         }
720
721         newkey = kmalloc(cmd.tcpm_keylen, GFP_KERNEL);
722         if (!newkey)
723                 return -ENOMEM;
724         memcpy(newkey, cmd.tcpm_key, cmd.tcpm_keylen);
725         if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED) {
726                 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
727                                          newkey, cmd.tcpm_keylen);
728         }
729         return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
730 }
731
732 static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
733                                    struct in6_addr *saddr,
734                                    struct in6_addr *daddr,
735                                    struct tcphdr *th, int protocol,
736                                    int tcplen)
737 {
738         struct scatterlist sg[4];
739         __u16 data_len;
740         int block = 0;
741         __u16 cksum;
742         struct tcp_md5sig_pool *hp;
743         struct tcp6_pseudohdr *bp;
744         struct hash_desc *desc;
745         int err;
746         unsigned int nbytes = 0;
747
748         hp = tcp_get_md5sig_pool();
749         if (!hp) {
750                 printk(KERN_WARNING "%s(): hash pool not found...\n", __FUNCTION__);
751                 goto clear_hash_noput;
752         }
753         bp = &hp->md5_blk.ip6;
754         desc = &hp->md5_desc;
755
756         /* 1. TCP pseudo-header (RFC2460) */
757         ipv6_addr_copy(&bp->saddr, saddr);
758         ipv6_addr_copy(&bp->daddr, daddr);
759         bp->len = htonl(tcplen);
760         bp->protocol = htonl(protocol);
761
762         sg_set_buf(&sg[block++], bp, sizeof(*bp));
763         nbytes += sizeof(*bp);
764
765         /* 2. TCP header, excluding options */
766         cksum = th->check;
767         th->check = 0;
768         sg_set_buf(&sg[block++], th, sizeof(*th));
769         nbytes += sizeof(*th);
770
771         /* 3. TCP segment data (if any) */
772         data_len = tcplen - (th->doff << 2);
773         if (data_len > 0) {
774                 u8 *data = (u8 *)th + (th->doff << 2);
775                 sg_set_buf(&sg[block++], data, data_len);
776                 nbytes += data_len;
777         }
778
779         /* 4. shared key */
780         sg_set_buf(&sg[block++], key->key, key->keylen);
781         nbytes += key->keylen;
782
783         /* Now store the hash into the packet */
784         err = crypto_hash_init(desc);
785         if (err) {
786                 printk(KERN_WARNING "%s(): hash_init failed\n", __FUNCTION__);
787                 goto clear_hash;
788         }
789         err = crypto_hash_update(desc, sg, nbytes);
790         if (err) {
791                 printk(KERN_WARNING "%s(): hash_update failed\n", __FUNCTION__);
792                 goto clear_hash;
793         }
794         err = crypto_hash_final(desc, md5_hash);
795         if (err) {
796                 printk(KERN_WARNING "%s(): hash_final failed\n", __FUNCTION__);
797                 goto clear_hash;
798         }
799
800         /* Reset header, and free up the crypto */
801         tcp_put_md5sig_pool();
802         th->check = cksum;
803 out:
804         return 0;
805 clear_hash:
806         tcp_put_md5sig_pool();
807 clear_hash_noput:
808         memset(md5_hash, 0, 16);
809         goto out;
810 }
811
812 static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
813                                 struct sock *sk,
814                                 struct dst_entry *dst,
815                                 struct request_sock *req,
816                                 struct tcphdr *th, int protocol,
817                                 int tcplen)
818 {
819         struct in6_addr *saddr, *daddr;
820
821         if (sk) {
822                 saddr = &inet6_sk(sk)->saddr;
823                 daddr = &inet6_sk(sk)->daddr;
824         } else {
825                 saddr = &inet6_rsk(req)->loc_addr;
826                 daddr = &inet6_rsk(req)->rmt_addr;
827         }
828         return tcp_v6_do_calc_md5_hash(md5_hash, key,
829                                        saddr, daddr,
830                                        th, protocol, tcplen);
831 }
832
833 static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
834 {
835         __u8 *hash_location = NULL;
836         struct tcp_md5sig_key *hash_expected;
837         struct ipv6hdr *ip6h = skb->nh.ipv6h;
838         struct tcphdr *th = skb->h.th;
839         int length = (th->doff << 2) - sizeof (*th);
840         int genhash;
841         u8 *ptr;
842         u8 newhash[16];
843
844         hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
845
846         /* If the TCP option is too short, we can short cut */
847         if (length < TCPOLEN_MD5SIG)
848                 return hash_expected ? 1 : 0;
849
850         /* parse options */
851         ptr = (u8*)(th + 1);
852         while (length > 0) {
853                 int opcode = *ptr++;
854                 int opsize;
855
856                 switch(opcode) {
857                 case TCPOPT_EOL:
858                         goto done_opts;
859                 case TCPOPT_NOP:
860                         length--;
861                         continue;
862                 default:
863                         opsize = *ptr++;
864                         if (opsize < 2 || opsize > length)
865                                 goto done_opts;
866                         if (opcode == TCPOPT_MD5SIG) {
867                                 hash_location = ptr;
868                                 goto done_opts;
869                         }
870                 }
871                 ptr += opsize - 2;
872                 length -= opsize;
873         }
874
875 done_opts:
876         /* do we have a hash as expected? */
877         if (!hash_expected) {
878                 if (!hash_location)
879                         return 0;
880                 if (net_ratelimit()) {
881                         printk(KERN_INFO "MD5 Hash NOT expected but found "
882                                "(" NIP6_FMT ", %u)->"
883                                "(" NIP6_FMT ", %u)\n",
884                                NIP6(ip6h->saddr), ntohs(th->source),
885                                NIP6(ip6h->daddr), ntohs(th->dest));
886                 }
887                 return 1;
888         }
889
890         if (!hash_location) {
891                 if (net_ratelimit()) {
892                         printk(KERN_INFO "MD5 Hash expected but NOT found "
893                                "(" NIP6_FMT ", %u)->"
894                                "(" NIP6_FMT ", %u)\n",
895                                NIP6(ip6h->saddr), ntohs(th->source),
896                                NIP6(ip6h->daddr), ntohs(th->dest));
897                 }
898                 return 1;
899         }
900
901         /* check the signature */
902         genhash = tcp_v6_do_calc_md5_hash(newhash,
903                                           hash_expected,
904                                           &ip6h->saddr, &ip6h->daddr,
905                                           th, sk->sk_protocol,
906                                           skb->len);
907         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
908                 if (net_ratelimit()) {
909                         printk(KERN_INFO "MD5 Hash %s for "
910                                "(" NIP6_FMT ", %u)->"
911                                "(" NIP6_FMT ", %u)\n",
912                                genhash ? "failed" : "mismatch",
913                                NIP6(ip6h->saddr), ntohs(th->source),
914                                NIP6(ip6h->daddr), ntohs(th->dest));
915                 }
916                 return 1;
917         }
918         return 0;
919 }
920 #endif
921
922 static struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
923         .family         =       AF_INET6,
924         .obj_size       =       sizeof(struct tcp6_request_sock),
925         .rtx_syn_ack    =       tcp_v6_send_synack,
926         .send_ack       =       tcp_v6_reqsk_send_ack,
927         .destructor     =       tcp_v6_reqsk_destructor,
928         .send_reset     =       tcp_v6_send_reset
929 };
930
931 struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
932 #ifdef CONFIG_TCP_MD5SIG
933         .md5_lookup     =       tcp_v6_reqsk_md5_lookup,
934 #endif
935 };
936
937 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
938         .twsk_obj_size  = sizeof(struct tcp6_timewait_sock),
939         .twsk_unique    = tcp_twsk_unique,
940         .twsk_destructor= tcp_twsk_destructor,
941 };
942
943 static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
944 {
945         struct ipv6_pinfo *np = inet6_sk(sk);
946         struct tcphdr *th = skb->h.th;
947
948         if (skb->ip_summed == CHECKSUM_PARTIAL) {
949                 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,  0);
950                 skb->csum = offsetof(struct tcphdr, check);
951         } else {
952                 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 
953                                             csum_partial((char *)th, th->doff<<2, 
954                                                          skb->csum));
955         }
956 }
957
958 static int tcp_v6_gso_send_check(struct sk_buff *skb)
959 {
960         struct ipv6hdr *ipv6h;
961         struct tcphdr *th;
962
963         if (!pskb_may_pull(skb, sizeof(*th)))
964                 return -EINVAL;
965
966         ipv6h = skb->nh.ipv6h;
967         th = skb->h.th;
968
969         th->check = 0;
970         th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
971                                      IPPROTO_TCP, 0);
972         skb->csum = offsetof(struct tcphdr, check);
973         skb->ip_summed = CHECKSUM_PARTIAL;
974         return 0;
975 }
976
977 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
978 {
979         struct tcphdr *th = skb->h.th, *t1; 
980         struct sk_buff *buff;
981         struct flowi fl;
982         int tot_len = sizeof(*th);
983 #ifdef CONFIG_TCP_MD5SIG
984         struct tcp_md5sig_key *key;
985 #endif
986
987         if (th->rst)
988                 return;
989
990         if (!ipv6_unicast_destination(skb))
991                 return; 
992
993 #ifdef CONFIG_TCP_MD5SIG
994         if (sk)
995                 key = tcp_v6_md5_do_lookup(sk, &skb->nh.ipv6h->daddr);
996         else
997                 key = NULL;
998
999         if (key)
1000                 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1001 #endif
1002
1003         /*
1004          * We need to grab some memory, and put together an RST,
1005          * and then put it into the queue to be sent.
1006          */
1007
1008         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1009                          GFP_ATOMIC);
1010         if (buff == NULL) 
1011                 return;
1012
1013         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1014
1015         t1 = (struct tcphdr *) skb_push(buff, tot_len);
1016
1017         /* Swap the send and the receive. */
1018         memset(t1, 0, sizeof(*t1));
1019         t1->dest = th->source;
1020         t1->source = th->dest;
1021         t1->doff = tot_len / 4;
1022         t1->rst = 1;
1023   
1024         if(th->ack) {
1025                 t1->seq = th->ack_seq;
1026         } else {
1027                 t1->ack = 1;
1028                 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
1029                                     + skb->len - (th->doff<<2));
1030         }
1031
1032 #ifdef CONFIG_TCP_MD5SIG
1033         if (key) {
1034                 u32 *opt = (u32*)(t1 + 1);
1035                 opt[0] = htonl((TCPOPT_NOP << 24) |
1036                                (TCPOPT_NOP << 16) |
1037                                (TCPOPT_MD5SIG << 8) |
1038                                TCPOLEN_MD5SIG);
1039                 tcp_v6_do_calc_md5_hash((__u8*)&opt[1],
1040                                         key,
1041                                         &skb->nh.ipv6h->daddr,
1042                                         &skb->nh.ipv6h->saddr,
1043                                         t1, IPPROTO_TCP,
1044                                         tot_len);
1045         }
1046 #endif
1047
1048         buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
1049
1050         memset(&fl, 0, sizeof(fl));
1051         ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
1052         ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
1053
1054         t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1055                                     sizeof(*t1), IPPROTO_TCP,
1056                                     buff->csum);
1057
1058         fl.proto = IPPROTO_TCP;
1059         fl.oif = inet6_iif(skb);
1060         fl.fl_ip_dport = t1->dest;
1061         fl.fl_ip_sport = t1->source;
1062         security_skb_classify_flow(skb, &fl);
1063
1064         /* sk = NULL, but it is safe for now. RST socket required. */
1065         if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
1066
1067                 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1068                         ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
1069                         TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1070                         TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1071                         return;
1072                 }
1073         }
1074
1075         kfree_skb(buff);
1076 }
1077
1078 static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
1079                             struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
1080 {
1081         struct tcphdr *th = skb->h.th, *t1;
1082         struct sk_buff *buff;
1083         struct flowi fl;
1084         int tot_len = sizeof(struct tcphdr);
1085         u32 *topt;
1086 #ifdef CONFIG_TCP_MD5SIG
1087         struct tcp_md5sig_key *key;
1088         struct tcp_md5sig_key tw_key;
1089 #endif
1090
1091 #ifdef CONFIG_TCP_MD5SIG
1092         if (!tw && skb->sk) {
1093                 key = tcp_v6_md5_do_lookup(skb->sk, &skb->nh.ipv6h->daddr);
1094         } else if (tw && tw->tw_md5_keylen) {
1095                 tw_key.key = tw->tw_md5_key;
1096                 tw_key.keylen = tw->tw_md5_keylen;
1097                 key = &tw_key;
1098         } else {
1099                 key = NULL;
1100         }
1101 #endif
1102
1103         if (ts)
1104                 tot_len += TCPOLEN_TSTAMP_ALIGNED;
1105 #ifdef CONFIG_TCP_MD5SIG
1106         if (key)
1107                 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1108 #endif
1109
1110         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1111                          GFP_ATOMIC);
1112         if (buff == NULL)
1113                 return;
1114
1115         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1116
1117         t1 = (struct tcphdr *) skb_push(buff,tot_len);
1118
1119         /* Swap the send and the receive. */
1120         memset(t1, 0, sizeof(*t1));
1121         t1->dest = th->source;
1122         t1->source = th->dest;
1123         t1->doff = tot_len/4;
1124         t1->seq = htonl(seq);
1125         t1->ack_seq = htonl(ack);
1126         t1->ack = 1;
1127         t1->window = htons(win);
1128
1129         topt = (u32*)(t1 + 1);
1130         
1131         if (ts) {
1132                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1133                                 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1134                 *topt++ = htonl(tcp_time_stamp);
1135                 *topt = htonl(ts);
1136         }
1137
1138 #ifdef CONFIG_TCP_MD5SIG
1139         if (key) {
1140                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1141                                 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1142                 tcp_v6_do_calc_md5_hash((__u8 *)topt,
1143                                         key,
1144                                         &skb->nh.ipv6h->daddr,
1145                                         &skb->nh.ipv6h->saddr,
1146                                         t1, IPPROTO_TCP,
1147                                         tot_len);
1148         }
1149 #endif
1150
1151         buff->csum = csum_partial((char *)t1, tot_len, 0);
1152
1153         memset(&fl, 0, sizeof(fl));
1154         ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
1155         ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
1156
1157         t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1158                                     tot_len, IPPROTO_TCP,
1159                                     buff->csum);
1160
1161         fl.proto = IPPROTO_TCP;
1162         fl.oif = inet6_iif(skb);
1163         fl.fl_ip_dport = t1->dest;
1164         fl.fl_ip_sport = t1->source;
1165         security_skb_classify_flow(skb, &fl);
1166
1167         if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
1168                 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1169                         ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
1170                         TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1171                         return;
1172                 }
1173         }
1174
1175         kfree_skb(buff);
1176 }
1177
1178 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1179 {
1180         struct inet_timewait_sock *tw = inet_twsk(sk);
1181         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1182
1183         tcp_v6_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1184                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1185                         tcptw->tw_ts_recent);
1186
1187         inet_twsk_put(tw);
1188 }
1189
1190 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
1191 {
1192         tcp_v6_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
1193 }
1194
1195
1196 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1197 {
1198         struct request_sock *req, **prev;
1199         const struct tcphdr *th = skb->h.th;
1200         struct sock *nsk;
1201
1202         /* Find possible connection requests. */
1203         req = inet6_csk_search_req(sk, &prev, th->source,
1204                                    &skb->nh.ipv6h->saddr,
1205                                    &skb->nh.ipv6h->daddr, inet6_iif(skb));
1206         if (req)
1207                 return tcp_check_req(sk, skb, req, prev);
1208
1209         nsk = __inet6_lookup_established(&tcp_hashinfo, &skb->nh.ipv6h->saddr,
1210                                          th->source, &skb->nh.ipv6h->daddr,
1211                                          ntohs(th->dest), inet6_iif(skb));
1212
1213         if (nsk) {
1214                 if (nsk->sk_state != TCP_TIME_WAIT) {
1215                         bh_lock_sock(nsk);
1216                         return nsk;
1217                 }
1218                 inet_twsk_put(inet_twsk(nsk));
1219                 return NULL;
1220         }
1221
1222 #if 0 /*def CONFIG_SYN_COOKIES*/
1223         if (!th->rst && !th->syn && th->ack)
1224                 sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt));
1225 #endif
1226         return sk;
1227 }
1228
1229 /* FIXME: this is substantially similar to the ipv4 code.
1230  * Can some kind of merge be done? -- erics
1231  */
1232 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1233 {
1234         struct inet6_request_sock *treq;
1235         struct ipv6_pinfo *np = inet6_sk(sk);
1236         struct tcp_options_received tmp_opt;
1237         struct tcp_sock *tp = tcp_sk(sk);
1238         struct request_sock *req = NULL;
1239         __u32 isn = TCP_SKB_CB(skb)->when;
1240
1241         if (skb->protocol == htons(ETH_P_IP))
1242                 return tcp_v4_conn_request(sk, skb);
1243
1244         if (!ipv6_unicast_destination(skb))
1245                 goto drop; 
1246
1247         /*
1248          *      There are no SYN attacks on IPv6, yet...        
1249          */
1250         if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1251                 if (net_ratelimit())
1252                         printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
1253                 goto drop;              
1254         }
1255
1256         if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1257                 goto drop;
1258
1259         req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1260         if (req == NULL)
1261                 goto drop;
1262
1263 #ifdef CONFIG_TCP_MD5SIG
1264         tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1265 #endif
1266
1267         tcp_clear_options(&tmp_opt);
1268         tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1269         tmp_opt.user_mss = tp->rx_opt.user_mss;
1270
1271         tcp_parse_options(skb, &tmp_opt, 0);
1272
1273         tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1274         tcp_openreq_init(req, &tmp_opt, skb);
1275
1276         treq = inet6_rsk(req);
1277         ipv6_addr_copy(&treq->rmt_addr, &skb->nh.ipv6h->saddr);
1278         ipv6_addr_copy(&treq->loc_addr, &skb->nh.ipv6h->daddr);
1279         TCP_ECN_create_request(req, skb->h.th);
1280         treq->pktopts = NULL;
1281         if (ipv6_opt_accepted(sk, skb) ||
1282             np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1283             np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1284                 atomic_inc(&skb->users);
1285                 treq->pktopts = skb;
1286         }
1287         treq->iif = sk->sk_bound_dev_if;
1288
1289         /* So that link locals have meaning */
1290         if (!sk->sk_bound_dev_if &&
1291             ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1292                 treq->iif = inet6_iif(skb);
1293
1294         if (isn == 0) 
1295                 isn = tcp_v6_init_sequence(skb);
1296
1297         tcp_rsk(req)->snt_isn = isn;
1298
1299         security_inet_conn_request(sk, skb, req);
1300
1301         if (tcp_v6_send_synack(sk, req, NULL))
1302                 goto drop;
1303
1304         inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1305         return 0;
1306
1307 drop:
1308         if (req)
1309                 reqsk_free(req);
1310
1311         return 0; /* don't send reset */
1312 }
1313
1314 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1315                                           struct request_sock *req,
1316                                           struct dst_entry *dst)
1317 {
1318         struct inet6_request_sock *treq = inet6_rsk(req);
1319         struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1320         struct tcp6_sock *newtcp6sk;
1321         struct inet_sock *newinet;
1322         struct tcp_sock *newtp;
1323         struct sock *newsk;
1324         struct ipv6_txoptions *opt;
1325 #ifdef CONFIG_TCP_MD5SIG
1326         struct tcp_md5sig_key *key;
1327 #endif
1328
1329         if (skb->protocol == htons(ETH_P_IP)) {
1330                 /*
1331                  *      v6 mapped
1332                  */
1333
1334                 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1335
1336                 if (newsk == NULL) 
1337                         return NULL;
1338
1339                 newtcp6sk = (struct tcp6_sock *)newsk;
1340                 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1341
1342                 newinet = inet_sk(newsk);
1343                 newnp = inet6_sk(newsk);
1344                 newtp = tcp_sk(newsk);
1345
1346                 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1347
1348                 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
1349                               newinet->daddr);
1350
1351                 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
1352                               newinet->saddr);
1353
1354                 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1355
1356                 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1357                 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1358 #ifdef CONFIG_TCP_MD5SIG
1359                 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1360 #endif
1361
1362                 newnp->pktoptions  = NULL;
1363                 newnp->opt         = NULL;
1364                 newnp->mcast_oif   = inet6_iif(skb);
1365                 newnp->mcast_hops  = skb->nh.ipv6h->hop_limit;
1366
1367                 /*
1368                  * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1369                  * here, tcp_create_openreq_child now does this for us, see the comment in
1370                  * that function for the gory details. -acme
1371                  */
1372
1373                 /* It is tricky place. Until this moment IPv4 tcp
1374                    worked with IPv6 icsk.icsk_af_ops.
1375                    Sync it now.
1376                  */
1377                 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1378
1379                 return newsk;
1380         }
1381
1382         opt = np->opt;
1383
1384         if (sk_acceptq_is_full(sk))
1385                 goto out_overflow;
1386
1387         if (np->rxopt.bits.osrcrt == 2 &&
1388             opt == NULL && treq->pktopts) {
1389                 struct inet6_skb_parm *rxopt = IP6CB(treq->pktopts);
1390                 if (rxopt->srcrt)
1391                         opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr *)(treq->pktopts->nh.raw + rxopt->srcrt));
1392         }
1393
1394         if (dst == NULL) {
1395                 struct in6_addr *final_p = NULL, final;
1396                 struct flowi fl;
1397
1398                 memset(&fl, 0, sizeof(fl));
1399                 fl.proto = IPPROTO_TCP;
1400                 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1401                 if (opt && opt->srcrt) {
1402                         struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1403                         ipv6_addr_copy(&final, &fl.fl6_dst);
1404                         ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1405                         final_p = &final;
1406                 }
1407                 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1408                 fl.oif = sk->sk_bound_dev_if;
1409                 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1410                 fl.fl_ip_sport = inet_sk(sk)->sport;
1411                 security_req_classify_flow(req, &fl);
1412
1413                 if (ip6_dst_lookup(sk, &dst, &fl))
1414                         goto out;
1415
1416                 if (final_p)
1417                         ipv6_addr_copy(&fl.fl6_dst, final_p);
1418
1419                 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
1420                         goto out;
1421         } 
1422
1423         newsk = tcp_create_openreq_child(sk, req, skb);
1424         if (newsk == NULL)
1425                 goto out;
1426
1427         /*
1428          * No need to charge this sock to the relevant IPv6 refcnt debug socks
1429          * count here, tcp_create_openreq_child now does this for us, see the
1430          * comment in that function for the gory details. -acme
1431          */
1432
1433         newsk->sk_gso_type = SKB_GSO_TCPV6;
1434         __ip6_dst_store(newsk, dst, NULL, NULL);
1435
1436         newtcp6sk = (struct tcp6_sock *)newsk;
1437         inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1438
1439         newtp = tcp_sk(newsk);
1440         newinet = inet_sk(newsk);
1441         newnp = inet6_sk(newsk);
1442
1443         memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1444
1445         ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1446         ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1447         ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1448         newsk->sk_bound_dev_if = treq->iif;
1449
1450         /* Now IPv6 options... 
1451
1452            First: no IPv4 options.
1453          */
1454         newinet->opt = NULL;
1455
1456         /* Clone RX bits */
1457         newnp->rxopt.all = np->rxopt.all;
1458
1459         /* Clone pktoptions received with SYN */
1460         newnp->pktoptions = NULL;
1461         if (treq->pktopts != NULL) {
1462                 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1463                 kfree_skb(treq->pktopts);
1464                 treq->pktopts = NULL;
1465                 if (newnp->pktoptions)
1466                         skb_set_owner_r(newnp->pktoptions, newsk);
1467         }
1468         newnp->opt        = NULL;
1469         newnp->mcast_oif  = inet6_iif(skb);
1470         newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
1471
1472         /* Clone native IPv6 options from listening socket (if any)
1473
1474            Yes, keeping reference count would be much more clever,
1475            but we make one more one thing there: reattach optmem
1476            to newsk.
1477          */
1478         if (opt) {
1479                 newnp->opt = ipv6_dup_options(newsk, opt);
1480                 if (opt != np->opt)
1481                         sock_kfree_s(sk, opt, opt->tot_len);
1482         }
1483
1484         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1485         if (newnp->opt)
1486                 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1487                                                      newnp->opt->opt_flen);
1488
1489         tcp_mtup_init(newsk);
1490         tcp_sync_mss(newsk, dst_mtu(dst));
1491         newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1492         tcp_initialize_rcv_mss(newsk);
1493
1494         newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1495
1496 #ifdef CONFIG_TCP_MD5SIG
1497         /* Copy over the MD5 key from the original socket */
1498         if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1499                 /* We're using one, so create a matching key
1500                  * on the newsk structure. If we fail to get
1501                  * memory, then we end up not copying the key
1502                  * across. Shucks.
1503                  */
1504                 char *newkey = kmalloc(key->keylen, GFP_ATOMIC);
1505                 if (newkey) {
1506                         memcpy(newkey, key->key, key->keylen);
1507                         tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr,
1508                                           newkey, key->keylen);
1509                 }
1510         }
1511 #endif
1512
1513         __inet6_hash(&tcp_hashinfo, newsk);
1514         inet_inherit_port(&tcp_hashinfo, sk, newsk);
1515
1516         return newsk;
1517
1518 out_overflow:
1519         NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1520 out:
1521         NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1522         if (opt && opt != np->opt)
1523                 sock_kfree_s(sk, opt, opt->tot_len);
1524         dst_release(dst);
1525         return NULL;
1526 }
1527
1528 static int tcp_v6_checksum_init(struct sk_buff *skb)
1529 {
1530         if (skb->ip_summed == CHECKSUM_COMPLETE) {
1531                 if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1532                                   &skb->nh.ipv6h->daddr,skb->csum)) {
1533                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1534                         return 0;
1535                 }
1536         }
1537
1538         skb->csum = ~tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1539                                   &skb->nh.ipv6h->daddr, 0);
1540
1541         if (skb->len <= 76) {
1542                 return __skb_checksum_complete(skb);
1543         }
1544         return 0;
1545 }
1546
1547 /* The socket must have it's spinlock held when we get
1548  * here.
1549  *
1550  * We have a potential double-lock case here, so even when
1551  * doing backlog processing we use the BH locking scheme.
1552  * This is because we cannot sleep with the original spinlock
1553  * held.
1554  */
1555 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1556 {
1557         struct ipv6_pinfo *np = inet6_sk(sk);
1558         struct tcp_sock *tp;
1559         struct sk_buff *opt_skb = NULL;
1560
1561         /* Imagine: socket is IPv6. IPv4 packet arrives,
1562            goes to IPv4 receive handler and backlogged.
1563            From backlog it always goes here. Kerboom...
1564            Fortunately, tcp_rcv_established and rcv_established
1565            handle them correctly, but it is not case with
1566            tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1567          */
1568
1569         if (skb->protocol == htons(ETH_P_IP))
1570                 return tcp_v4_do_rcv(sk, skb);
1571
1572 #ifdef CONFIG_TCP_MD5SIG
1573         if (tcp_v6_inbound_md5_hash (sk, skb))
1574                 goto discard;
1575 #endif
1576
1577         if (sk_filter(sk, skb))
1578                 goto discard;
1579
1580         /*
1581          *      socket locking is here for SMP purposes as backlog rcv
1582          *      is currently called with bh processing disabled.
1583          */
1584
1585         /* Do Stevens' IPV6_PKTOPTIONS.
1586
1587            Yes, guys, it is the only place in our code, where we
1588            may make it not affecting IPv4.
1589            The rest of code is protocol independent,
1590            and I do not like idea to uglify IPv4.
1591
1592            Actually, all the idea behind IPV6_PKTOPTIONS
1593            looks not very well thought. For now we latch
1594            options, received in the last packet, enqueued
1595            by tcp. Feel free to propose better solution.
1596                                                --ANK (980728)
1597          */
1598         if (np->rxopt.all)
1599                 opt_skb = skb_clone(skb, GFP_ATOMIC);
1600
1601         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1602                 TCP_CHECK_TIMER(sk);
1603                 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1604                         goto reset;
1605                 TCP_CHECK_TIMER(sk);
1606                 if (opt_skb)
1607                         goto ipv6_pktoptions;
1608                 return 0;
1609         }
1610
1611         if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb))
1612                 goto csum_err;
1613
1614         if (sk->sk_state == TCP_LISTEN) { 
1615                 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1616                 if (!nsk)
1617                         goto discard;
1618
1619                 /*
1620                  * Queue it on the new socket if the new socket is active,
1621                  * otherwise we just shortcircuit this and continue with
1622                  * the new socket..
1623                  */
1624                 if(nsk != sk) {
1625                         if (tcp_child_process(sk, nsk, skb))
1626                                 goto reset;
1627                         if (opt_skb)
1628                                 __kfree_skb(opt_skb);
1629                         return 0;
1630                 }
1631         }
1632
1633         TCP_CHECK_TIMER(sk);
1634         if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1635                 goto reset;
1636         TCP_CHECK_TIMER(sk);
1637         if (opt_skb)
1638                 goto ipv6_pktoptions;
1639         return 0;
1640
1641 reset:
1642         tcp_v6_send_reset(sk, skb);
1643 discard:
1644         if (opt_skb)
1645                 __kfree_skb(opt_skb);
1646         kfree_skb(skb);
1647         return 0;
1648 csum_err:
1649         TCP_INC_STATS_BH(TCP_MIB_INERRS);
1650         goto discard;
1651
1652
1653 ipv6_pktoptions:
1654         /* Do you ask, what is it?
1655
1656            1. skb was enqueued by tcp.
1657            2. skb is added to tail of read queue, rather than out of order.
1658            3. socket is not in passive state.
1659            4. Finally, it really contains options, which user wants to receive.
1660          */
1661         tp = tcp_sk(sk);
1662         if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1663             !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1664                 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1665                         np->mcast_oif = inet6_iif(opt_skb);
1666                 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1667                         np->mcast_hops = opt_skb->nh.ipv6h->hop_limit;
1668                 if (ipv6_opt_accepted(sk, opt_skb)) {
1669                         skb_set_owner_r(opt_skb, sk);
1670                         opt_skb = xchg(&np->pktoptions, opt_skb);
1671                 } else {
1672                         __kfree_skb(opt_skb);
1673                         opt_skb = xchg(&np->pktoptions, NULL);
1674                 }
1675         }
1676
1677         if (opt_skb)
1678                 kfree_skb(opt_skb);
1679         return 0;
1680 }
1681
1682 static int tcp_v6_rcv(struct sk_buff **pskb)
1683 {
1684         struct sk_buff *skb = *pskb;
1685         struct tcphdr *th;      
1686         struct sock *sk;
1687         int ret;
1688
1689         if (skb->pkt_type != PACKET_HOST)
1690                 goto discard_it;
1691
1692         /*
1693          *      Count it even if it's bad.
1694          */
1695         TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1696
1697         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1698                 goto discard_it;
1699
1700         th = skb->h.th;
1701
1702         if (th->doff < sizeof(struct tcphdr)/4)
1703                 goto bad_packet;
1704         if (!pskb_may_pull(skb, th->doff*4))
1705                 goto discard_it;
1706
1707         if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1708              tcp_v6_checksum_init(skb)))
1709                 goto bad_packet;
1710
1711         th = skb->h.th;
1712         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1713         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1714                                     skb->len - th->doff*4);
1715         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1716         TCP_SKB_CB(skb)->when = 0;
1717         TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(skb->nh.ipv6h);
1718         TCP_SKB_CB(skb)->sacked = 0;
1719
1720         sk = __inet6_lookup(&tcp_hashinfo, &skb->nh.ipv6h->saddr, th->source,
1721                             &skb->nh.ipv6h->daddr, ntohs(th->dest),
1722                             inet6_iif(skb));
1723
1724         if (!sk)
1725                 goto no_tcp_socket;
1726
1727 process:
1728         if (sk->sk_state == TCP_TIME_WAIT)
1729                 goto do_time_wait;
1730
1731         if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1732                 goto discard_and_relse;
1733
1734         if (sk_filter(sk, skb))
1735                 goto discard_and_relse;
1736
1737         skb->dev = NULL;
1738
1739         bh_lock_sock_nested(sk);
1740         ret = 0;
1741         if (!sock_owned_by_user(sk)) {
1742 #ifdef CONFIG_NET_DMA
1743                 struct tcp_sock *tp = tcp_sk(sk);
1744                 if (tp->ucopy.dma_chan)
1745                         ret = tcp_v6_do_rcv(sk, skb);
1746                 else
1747 #endif
1748                 {
1749                         if (!tcp_prequeue(sk, skb))
1750                                 ret = tcp_v6_do_rcv(sk, skb);
1751                 }
1752         } else
1753                 sk_add_backlog(sk, skb);
1754         bh_unlock_sock(sk);
1755
1756         sock_put(sk);
1757         return ret ? -1 : 0;
1758
1759 no_tcp_socket:
1760         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1761                 goto discard_it;
1762
1763         if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1764 bad_packet:
1765                 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1766         } else {
1767                 tcp_v6_send_reset(NULL, skb);
1768         }
1769
1770 discard_it:
1771
1772         /*
1773          *      Discard frame
1774          */
1775
1776         kfree_skb(skb);
1777         return 0;
1778
1779 discard_and_relse:
1780         sock_put(sk);
1781         goto discard_it;
1782
1783 do_time_wait:
1784         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1785                 inet_twsk_put(inet_twsk(sk));
1786                 goto discard_it;
1787         }
1788
1789         if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1790                 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1791                 inet_twsk_put(inet_twsk(sk));
1792                 goto discard_it;
1793         }
1794
1795         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1796         case TCP_TW_SYN:
1797         {
1798                 struct sock *sk2;
1799
1800                 sk2 = inet6_lookup_listener(&tcp_hashinfo,
1801                                             &skb->nh.ipv6h->daddr,
1802                                             ntohs(th->dest), inet6_iif(skb));
1803                 if (sk2 != NULL) {
1804                         struct inet_timewait_sock *tw = inet_twsk(sk);
1805                         inet_twsk_deschedule(tw, &tcp_death_row);
1806                         inet_twsk_put(tw);
1807                         sk = sk2;
1808                         goto process;
1809                 }
1810                 /* Fall through to ACK */
1811         }
1812         case TCP_TW_ACK:
1813                 tcp_v6_timewait_ack(sk, skb);
1814                 break;
1815         case TCP_TW_RST:
1816                 goto no_tcp_socket;
1817         case TCP_TW_SUCCESS:;
1818         }
1819         goto discard_it;
1820 }
1821
1822 static int tcp_v6_remember_stamp(struct sock *sk)
1823 {
1824         /* Alas, not yet... */
1825         return 0;
1826 }
1827
1828 static struct inet_connection_sock_af_ops ipv6_specific = {
1829         .queue_xmit        = inet6_csk_xmit,
1830         .send_check        = tcp_v6_send_check,
1831         .rebuild_header    = inet6_sk_rebuild_header,
1832         .conn_request      = tcp_v6_conn_request,
1833         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1834         .remember_stamp    = tcp_v6_remember_stamp,
1835         .net_header_len    = sizeof(struct ipv6hdr),
1836         .setsockopt        = ipv6_setsockopt,
1837         .getsockopt        = ipv6_getsockopt,
1838         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1839         .sockaddr_len      = sizeof(struct sockaddr_in6),
1840 #ifdef CONFIG_COMPAT
1841         .compat_setsockopt = compat_ipv6_setsockopt,
1842         .compat_getsockopt = compat_ipv6_getsockopt,
1843 #endif
1844 };
1845
1846 static struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1847 #ifdef CONFIG_TCP_MD5SIG
1848         .md5_lookup     =       tcp_v6_md5_lookup,
1849         .calc_md5_hash  =       tcp_v6_calc_md5_hash,
1850         .md5_add        =       tcp_v6_md5_add_func,
1851         .md5_parse      =       tcp_v6_parse_md5_keys,
1852 #endif
1853 };
1854
1855 /*
1856  *      TCP over IPv4 via INET6 API
1857  */
1858
1859 static struct inet_connection_sock_af_ops ipv6_mapped = {
1860         .queue_xmit        = ip_queue_xmit,
1861         .send_check        = tcp_v4_send_check,
1862         .rebuild_header    = inet_sk_rebuild_header,
1863         .conn_request      = tcp_v6_conn_request,
1864         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1865         .remember_stamp    = tcp_v4_remember_stamp,
1866         .net_header_len    = sizeof(struct iphdr),
1867         .setsockopt        = ipv6_setsockopt,
1868         .getsockopt        = ipv6_getsockopt,
1869         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1870         .sockaddr_len      = sizeof(struct sockaddr_in6),
1871 #ifdef CONFIG_COMPAT
1872         .compat_setsockopt = compat_ipv6_setsockopt,
1873         .compat_getsockopt = compat_ipv6_getsockopt,
1874 #endif
1875 };
1876
1877 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1878 #ifdef CONFIG_TCP_MD5SIG
1879         .md5_lookup     =       tcp_v4_md5_lookup,
1880         .calc_md5_hash  =       tcp_v4_calc_md5_hash,
1881         .md5_add        =       tcp_v6_md5_add_func,
1882         .md5_parse      =       tcp_v6_parse_md5_keys,
1883 #endif
1884 };
1885
1886 /* NOTE: A lot of things set to zero explicitly by call to
1887  *       sk_alloc() so need not be done here.
1888  */
1889 static int tcp_v6_init_sock(struct sock *sk)
1890 {
1891         struct inet_connection_sock *icsk = inet_csk(sk);
1892         struct tcp_sock *tp = tcp_sk(sk);
1893
1894         skb_queue_head_init(&tp->out_of_order_queue);
1895         tcp_init_xmit_timers(sk);
1896         tcp_prequeue_init(tp);
1897
1898         icsk->icsk_rto = TCP_TIMEOUT_INIT;
1899         tp->mdev = TCP_TIMEOUT_INIT;
1900
1901         /* So many TCP implementations out there (incorrectly) count the
1902          * initial SYN frame in their delayed-ACK and congestion control
1903          * algorithms that we must have the following bandaid to talk
1904          * efficiently to them.  -DaveM
1905          */
1906         tp->snd_cwnd = 2;
1907
1908         /* See draft-stevens-tcpca-spec-01 for discussion of the
1909          * initialization of these values.
1910          */
1911         tp->snd_ssthresh = 0x7fffffff;
1912         tp->snd_cwnd_clamp = ~0;
1913         tp->mss_cache = 536;
1914
1915         tp->reordering = sysctl_tcp_reordering;
1916
1917         sk->sk_state = TCP_CLOSE;
1918
1919         icsk->icsk_af_ops = &ipv6_specific;
1920         icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1921         icsk->icsk_sync_mss = tcp_sync_mss;
1922         sk->sk_write_space = sk_stream_write_space;
1923         sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1924
1925 #ifdef CONFIG_TCP_MD5SIG
1926         tp->af_specific = &tcp_sock_ipv6_specific;
1927 #endif
1928
1929         sk->sk_sndbuf = sysctl_tcp_wmem[1];
1930         sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1931
1932         atomic_inc(&tcp_sockets_allocated);
1933
1934         return 0;
1935 }
1936
1937 static int tcp_v6_destroy_sock(struct sock *sk)
1938 {
1939 #ifdef CONFIG_TCP_MD5SIG
1940         /* Clean up the MD5 key list */
1941         if (tcp_sk(sk)->md5sig_info)
1942                 tcp_v6_clear_md5_list(sk);
1943 #endif
1944         tcp_v4_destroy_sock(sk);
1945         return inet6_destroy_sock(sk);
1946 }
1947
1948 /* Proc filesystem TCPv6 sock list dumping. */
1949 static void get_openreq6(struct seq_file *seq, 
1950                          struct sock *sk, struct request_sock *req, int i, int uid)
1951 {
1952         int ttd = req->expires - jiffies;
1953         struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1954         struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1955
1956         if (ttd < 0)
1957                 ttd = 0;
1958
1959         seq_printf(seq,
1960                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1961                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1962                    i,
1963                    src->s6_addr32[0], src->s6_addr32[1],
1964                    src->s6_addr32[2], src->s6_addr32[3],
1965                    ntohs(inet_sk(sk)->sport),
1966                    dest->s6_addr32[0], dest->s6_addr32[1],
1967                    dest->s6_addr32[2], dest->s6_addr32[3],
1968                    ntohs(inet_rsk(req)->rmt_port),
1969                    TCP_SYN_RECV,
1970                    0,0, /* could print option size, but that is af dependent. */
1971                    1,   /* timers active (only the expire timer) */  
1972                    jiffies_to_clock_t(ttd), 
1973                    req->retrans,
1974                    uid,
1975                    0,  /* non standard timer */  
1976                    0, /* open_requests have no inode */
1977                    0, req);
1978 }
1979
1980 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1981 {
1982         struct in6_addr *dest, *src;
1983         __u16 destp, srcp;
1984         int timer_active;
1985         unsigned long timer_expires;
1986         struct inet_sock *inet = inet_sk(sp);
1987         struct tcp_sock *tp = tcp_sk(sp);
1988         const struct inet_connection_sock *icsk = inet_csk(sp);
1989         struct ipv6_pinfo *np = inet6_sk(sp);
1990
1991         dest  = &np->daddr;
1992         src   = &np->rcv_saddr;
1993         destp = ntohs(inet->dport);
1994         srcp  = ntohs(inet->sport);
1995
1996         if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1997                 timer_active    = 1;
1998                 timer_expires   = icsk->icsk_timeout;
1999         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2000                 timer_active    = 4;
2001                 timer_expires   = icsk->icsk_timeout;
2002         } else if (timer_pending(&sp->sk_timer)) {
2003                 timer_active    = 2;
2004                 timer_expires   = sp->sk_timer.expires;
2005         } else {
2006                 timer_active    = 0;
2007                 timer_expires = jiffies;
2008         }
2009
2010         seq_printf(seq,
2011                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2012                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
2013                    i,
2014                    src->s6_addr32[0], src->s6_addr32[1],
2015                    src->s6_addr32[2], src->s6_addr32[3], srcp,
2016                    dest->s6_addr32[0], dest->s6_addr32[1],
2017                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
2018                    sp->sk_state, 
2019                    tp->write_seq-tp->snd_una,
2020                    (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
2021                    timer_active,
2022                    jiffies_to_clock_t(timer_expires - jiffies),
2023                    icsk->icsk_retransmits,
2024                    sock_i_uid(sp),
2025                    icsk->icsk_probes_out,
2026                    sock_i_ino(sp),
2027                    atomic_read(&sp->sk_refcnt), sp,
2028                    icsk->icsk_rto,
2029                    icsk->icsk_ack.ato,
2030                    (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
2031                    tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
2032                    );
2033 }
2034
2035 static void get_timewait6_sock(struct seq_file *seq, 
2036                                struct inet_timewait_sock *tw, int i)
2037 {
2038         struct in6_addr *dest, *src;
2039         __u16 destp, srcp;
2040         struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2041         int ttd = tw->tw_ttd - jiffies;
2042
2043         if (ttd < 0)
2044                 ttd = 0;
2045
2046         dest = &tw6->tw_v6_daddr;
2047         src  = &tw6->tw_v6_rcv_saddr;
2048         destp = ntohs(tw->tw_dport);
2049         srcp  = ntohs(tw->tw_sport);
2050
2051         seq_printf(seq,
2052                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2053                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2054                    i,
2055                    src->s6_addr32[0], src->s6_addr32[1],
2056                    src->s6_addr32[2], src->s6_addr32[3], srcp,
2057                    dest->s6_addr32[0], dest->s6_addr32[1],
2058                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
2059                    tw->tw_substate, 0, 0,
2060                    3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2061                    atomic_read(&tw->tw_refcnt), tw);
2062 }
2063
2064 #ifdef CONFIG_PROC_FS
2065 static int tcp6_seq_show(struct seq_file *seq, void *v)
2066 {
2067         struct tcp_iter_state *st;
2068
2069         if (v == SEQ_START_TOKEN) {
2070                 seq_puts(seq,
2071                          "  sl  "
2072                          "local_address                         "
2073                          "remote_address                        "
2074                          "st tx_queue rx_queue tr tm->when retrnsmt"
2075                          "   uid  timeout inode\n");
2076                 goto out;
2077         }
2078         st = seq->private;
2079
2080         switch (st->state) {
2081         case TCP_SEQ_STATE_LISTENING:
2082         case TCP_SEQ_STATE_ESTABLISHED:
2083                 get_tcp6_sock(seq, v, st->num);
2084                 break;
2085         case TCP_SEQ_STATE_OPENREQ:
2086                 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2087                 break;
2088         case TCP_SEQ_STATE_TIME_WAIT:
2089                 get_timewait6_sock(seq, v, st->num);
2090                 break;
2091         }
2092 out:
2093         return 0;
2094 }
2095
2096 static struct file_operations tcp6_seq_fops;
2097 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2098         .owner          = THIS_MODULE,
2099         .name           = "tcp6",
2100         .family         = AF_INET6,
2101         .seq_show       = tcp6_seq_show,
2102         .seq_fops       = &tcp6_seq_fops,
2103 };
2104
2105 int __init tcp6_proc_init(void)
2106 {
2107         return tcp_proc_register(&tcp6_seq_afinfo);
2108 }
2109
2110 void tcp6_proc_exit(void)
2111 {
2112         tcp_proc_unregister(&tcp6_seq_afinfo);
2113 }
2114 #endif
2115
2116 struct proto tcpv6_prot = {
2117         .name                   = "TCPv6",
2118         .owner                  = THIS_MODULE,
2119         .close                  = tcp_close,
2120         .connect                = tcp_v6_connect,
2121         .disconnect             = tcp_disconnect,
2122         .accept                 = inet_csk_accept,
2123         .ioctl                  = tcp_ioctl,
2124         .init                   = tcp_v6_init_sock,
2125         .destroy                = tcp_v6_destroy_sock,
2126         .shutdown               = tcp_shutdown,
2127         .setsockopt             = tcp_setsockopt,
2128         .getsockopt             = tcp_getsockopt,
2129         .sendmsg                = tcp_sendmsg,
2130         .recvmsg                = tcp_recvmsg,
2131         .backlog_rcv            = tcp_v6_do_rcv,
2132         .hash                   = tcp_v6_hash,
2133         .unhash                 = tcp_unhash,
2134         .get_port               = tcp_v6_get_port,
2135         .enter_memory_pressure  = tcp_enter_memory_pressure,
2136         .sockets_allocated      = &tcp_sockets_allocated,
2137         .memory_allocated       = &tcp_memory_allocated,
2138         .memory_pressure        = &tcp_memory_pressure,
2139         .orphan_count           = &tcp_orphan_count,
2140         .sysctl_mem             = sysctl_tcp_mem,
2141         .sysctl_wmem            = sysctl_tcp_wmem,
2142         .sysctl_rmem            = sysctl_tcp_rmem,
2143         .max_header             = MAX_TCP_HEADER,
2144         .obj_size               = sizeof(struct tcp6_sock),
2145         .twsk_prot              = &tcp6_timewait_sock_ops,
2146         .rsk_prot               = &tcp6_request_sock_ops,
2147 #ifdef CONFIG_COMPAT
2148         .compat_setsockopt      = compat_tcp_setsockopt,
2149         .compat_getsockopt      = compat_tcp_getsockopt,
2150 #endif
2151 };
2152
2153 static struct inet6_protocol tcpv6_protocol = {
2154         .handler        =       tcp_v6_rcv,
2155         .err_handler    =       tcp_v6_err,
2156         .gso_send_check =       tcp_v6_gso_send_check,
2157         .gso_segment    =       tcp_tso_segment,
2158         .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2159 };
2160
2161 static struct inet_protosw tcpv6_protosw = {
2162         .type           =       SOCK_STREAM,
2163         .protocol       =       IPPROTO_TCP,
2164         .prot           =       &tcpv6_prot,
2165         .ops            =       &inet6_stream_ops,
2166         .capability     =       -1,
2167         .no_check       =       0,
2168         .flags          =       INET_PROTOSW_PERMANENT |
2169                                 INET_PROTOSW_ICSK,
2170 };
2171
2172 void __init tcpv6_init(void)
2173 {
2174         /* register inet6 protocol */
2175         if (inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP) < 0)
2176                 printk(KERN_ERR "tcpv6_init: Could not register protocol\n");
2177         inet6_register_protosw(&tcpv6_protosw);
2178
2179         if (inet_csk_ctl_sock_create(&tcp6_socket, PF_INET6, SOCK_RAW,
2180                                      IPPROTO_TCP) < 0)
2181                 panic("Failed to create the TCPv6 control socket.\n");
2182 }