ALSA: opl4 - Fix a wrong argument in proc write callback
[safe/jmp/linux-2.6] / net / ipv6 / tcp_ipv6.c
1 /*
2  *      TCP over IPv6
3  *      Linux INET6 implementation
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>
7  *
8  *      Based on:
9  *      linux/net/ipv4/tcp.c
10  *      linux/net/ipv4/tcp_input.c
11  *      linux/net/ipv4/tcp_output.c
12  *
13  *      Fixes:
14  *      Hideaki YOSHIFUJI       :       sin6_scope_id support
15  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
16  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
17  *                                      a single port at the same time.
18  *      YOSHIFUJI Hideaki @USAGI:       convert /proc/net/tcp6 to seq_file.
19  *
20  *      This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64
65 #include <asm/uaccess.h>
66
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
69
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
72
73 static void     tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74 static void     tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75                                       struct request_sock *req);
76
77 static int      tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78
79 static const struct inet_connection_sock_af_ops ipv6_mapped;
80 static const struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
84 #else
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
86                                                    struct in6_addr *addr)
87 {
88         return NULL;
89 }
90 #endif
91
92 static void tcp_v6_hash(struct sock *sk)
93 {
94         if (sk->sk_state != TCP_CLOSE) {
95                 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
96                         tcp_prot.hash(sk);
97                         return;
98                 }
99                 local_bh_disable();
100                 __inet6_hash(sk, NULL);
101                 local_bh_enable();
102         }
103 }
104
105 static __inline__ __sum16 tcp_v6_check(int len,
106                                    struct in6_addr *saddr,
107                                    struct in6_addr *daddr,
108                                    __wsum base)
109 {
110         return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
111 }
112
113 static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
114 {
115         return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
116                                             ipv6_hdr(skb)->saddr.s6_addr32,
117                                             tcp_hdr(skb)->dest,
118                                             tcp_hdr(skb)->source);
119 }
120
121 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
122                           int addr_len)
123 {
124         struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
125         struct inet_sock *inet = inet_sk(sk);
126         struct inet_connection_sock *icsk = inet_csk(sk);
127         struct ipv6_pinfo *np = inet6_sk(sk);
128         struct tcp_sock *tp = tcp_sk(sk);
129         struct in6_addr *saddr = NULL, *final_p = NULL, final;
130         struct flowi fl;
131         struct dst_entry *dst;
132         int addr_type;
133         int err;
134
135         if (addr_len < SIN6_LEN_RFC2133)
136                 return -EINVAL;
137
138         if (usin->sin6_family != AF_INET6)
139                 return(-EAFNOSUPPORT);
140
141         memset(&fl, 0, sizeof(fl));
142
143         if (np->sndflow) {
144                 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
145                 IP6_ECN_flow_init(fl.fl6_flowlabel);
146                 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
147                         struct ip6_flowlabel *flowlabel;
148                         flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
149                         if (flowlabel == NULL)
150                                 return -EINVAL;
151                         ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
152                         fl6_sock_release(flowlabel);
153                 }
154         }
155
156         /*
157          *      connect() to INADDR_ANY means loopback (BSD'ism).
158          */
159
160         if(ipv6_addr_any(&usin->sin6_addr))
161                 usin->sin6_addr.s6_addr[15] = 0x1;
162
163         addr_type = ipv6_addr_type(&usin->sin6_addr);
164
165         if(addr_type & IPV6_ADDR_MULTICAST)
166                 return -ENETUNREACH;
167
168         if (addr_type&IPV6_ADDR_LINKLOCAL) {
169                 if (addr_len >= sizeof(struct sockaddr_in6) &&
170                     usin->sin6_scope_id) {
171                         /* If interface is set while binding, indices
172                          * must coincide.
173                          */
174                         if (sk->sk_bound_dev_if &&
175                             sk->sk_bound_dev_if != usin->sin6_scope_id)
176                                 return -EINVAL;
177
178                         sk->sk_bound_dev_if = usin->sin6_scope_id;
179                 }
180
181                 /* Connect to link-local address requires an interface */
182                 if (!sk->sk_bound_dev_if)
183                         return -EINVAL;
184         }
185
186         if (tp->rx_opt.ts_recent_stamp &&
187             !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
188                 tp->rx_opt.ts_recent = 0;
189                 tp->rx_opt.ts_recent_stamp = 0;
190                 tp->write_seq = 0;
191         }
192
193         ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
194         np->flow_label = fl.fl6_flowlabel;
195
196         /*
197          *      TCP over IPv4
198          */
199
200         if (addr_type == IPV6_ADDR_MAPPED) {
201                 u32 exthdrlen = icsk->icsk_ext_hdr_len;
202                 struct sockaddr_in sin;
203
204                 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
205
206                 if (__ipv6_only_sock(sk))
207                         return -ENETUNREACH;
208
209                 sin.sin_family = AF_INET;
210                 sin.sin_port = usin->sin6_port;
211                 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
212
213                 icsk->icsk_af_ops = &ipv6_mapped;
214                 sk->sk_backlog_rcv = tcp_v4_do_rcv;
215 #ifdef CONFIG_TCP_MD5SIG
216                 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
217 #endif
218
219                 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
220
221                 if (err) {
222                         icsk->icsk_ext_hdr_len = exthdrlen;
223                         icsk->icsk_af_ops = &ipv6_specific;
224                         sk->sk_backlog_rcv = tcp_v6_do_rcv;
225 #ifdef CONFIG_TCP_MD5SIG
226                         tp->af_specific = &tcp_sock_ipv6_specific;
227 #endif
228                         goto failure;
229                 } else {
230                         ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
231                         ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
232                                                &np->rcv_saddr);
233                 }
234
235                 return err;
236         }
237
238         if (!ipv6_addr_any(&np->rcv_saddr))
239                 saddr = &np->rcv_saddr;
240
241         fl.proto = IPPROTO_TCP;
242         ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
243         ipv6_addr_copy(&fl.fl6_src,
244                        (saddr ? saddr : &np->saddr));
245         fl.oif = sk->sk_bound_dev_if;
246         fl.mark = sk->sk_mark;
247         fl.fl_ip_dport = usin->sin6_port;
248         fl.fl_ip_sport = inet->inet_sport;
249
250         if (np->opt && np->opt->srcrt) {
251                 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
252                 ipv6_addr_copy(&final, &fl.fl6_dst);
253                 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
254                 final_p = &final;
255         }
256
257         security_sk_classify_flow(sk, &fl);
258
259         err = ip6_dst_lookup(sk, &dst, &fl);
260         if (err)
261                 goto failure;
262         if (final_p)
263                 ipv6_addr_copy(&fl.fl6_dst, final_p);
264
265         err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
266         if (err < 0) {
267                 if (err == -EREMOTE)
268                         err = ip6_dst_blackhole(sk, &dst, &fl);
269                 if (err < 0)
270                         goto failure;
271         }
272
273         if (saddr == NULL) {
274                 saddr = &fl.fl6_src;
275                 ipv6_addr_copy(&np->rcv_saddr, saddr);
276         }
277
278         /* set the source address */
279         ipv6_addr_copy(&np->saddr, saddr);
280         inet->inet_rcv_saddr = LOOPBACK4_IPV6;
281
282         sk->sk_gso_type = SKB_GSO_TCPV6;
283         __ip6_dst_store(sk, dst, NULL, NULL);
284
285         icsk->icsk_ext_hdr_len = 0;
286         if (np->opt)
287                 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
288                                           np->opt->opt_nflen);
289
290         tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
291
292         inet->inet_dport = usin->sin6_port;
293
294         tcp_set_state(sk, TCP_SYN_SENT);
295         err = inet6_hash_connect(&tcp_death_row, sk);
296         if (err)
297                 goto late_failure;
298
299         if (!tp->write_seq)
300                 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
301                                                              np->daddr.s6_addr32,
302                                                              inet->inet_sport,
303                                                              inet->inet_dport);
304
305         err = tcp_connect(sk);
306         if (err)
307                 goto late_failure;
308
309         return 0;
310
311 late_failure:
312         tcp_set_state(sk, TCP_CLOSE);
313         __sk_dst_reset(sk);
314 failure:
315         inet->inet_dport = 0;
316         sk->sk_route_caps = 0;
317         return err;
318 }
319
320 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
321                 u8 type, u8 code, int offset, __be32 info)
322 {
323         struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
324         const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
325         struct ipv6_pinfo *np;
326         struct sock *sk;
327         int err;
328         struct tcp_sock *tp;
329         __u32 seq;
330         struct net *net = dev_net(skb->dev);
331
332         sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
333                         th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
334
335         if (sk == NULL) {
336                 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
337                                    ICMP6_MIB_INERRORS);
338                 return;
339         }
340
341         if (sk->sk_state == TCP_TIME_WAIT) {
342                 inet_twsk_put(inet_twsk(sk));
343                 return;
344         }
345
346         bh_lock_sock(sk);
347         if (sock_owned_by_user(sk))
348                 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
349
350         if (sk->sk_state == TCP_CLOSE)
351                 goto out;
352
353         tp = tcp_sk(sk);
354         seq = ntohl(th->seq);
355         if (sk->sk_state != TCP_LISTEN &&
356             !between(seq, tp->snd_una, tp->snd_nxt)) {
357                 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
358                 goto out;
359         }
360
361         np = inet6_sk(sk);
362
363         if (type == ICMPV6_PKT_TOOBIG) {
364                 struct dst_entry *dst = NULL;
365
366                 if (sock_owned_by_user(sk))
367                         goto out;
368                 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
369                         goto out;
370
371                 /* icmp should have updated the destination cache entry */
372                 dst = __sk_dst_check(sk, np->dst_cookie);
373
374                 if (dst == NULL) {
375                         struct inet_sock *inet = inet_sk(sk);
376                         struct flowi fl;
377
378                         /* BUGGG_FUTURE: Again, it is not clear how
379                            to handle rthdr case. Ignore this complexity
380                            for now.
381                          */
382                         memset(&fl, 0, sizeof(fl));
383                         fl.proto = IPPROTO_TCP;
384                         ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
385                         ipv6_addr_copy(&fl.fl6_src, &np->saddr);
386                         fl.oif = sk->sk_bound_dev_if;
387                         fl.mark = sk->sk_mark;
388                         fl.fl_ip_dport = inet->inet_dport;
389                         fl.fl_ip_sport = inet->inet_sport;
390                         security_skb_classify_flow(skb, &fl);
391
392                         if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
393                                 sk->sk_err_soft = -err;
394                                 goto out;
395                         }
396
397                         if ((err = xfrm_lookup(net, &dst, &fl, sk, 0)) < 0) {
398                                 sk->sk_err_soft = -err;
399                                 goto out;
400                         }
401
402                 } else
403                         dst_hold(dst);
404
405                 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
406                         tcp_sync_mss(sk, dst_mtu(dst));
407                         tcp_simple_retransmit(sk);
408                 } /* else let the usual retransmit timer handle it */
409                 dst_release(dst);
410                 goto out;
411         }
412
413         icmpv6_err_convert(type, code, &err);
414
415         /* Might be for an request_sock */
416         switch (sk->sk_state) {
417                 struct request_sock *req, **prev;
418         case TCP_LISTEN:
419                 if (sock_owned_by_user(sk))
420                         goto out;
421
422                 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
423                                            &hdr->saddr, inet6_iif(skb));
424                 if (!req)
425                         goto out;
426
427                 /* ICMPs are not backlogged, hence we cannot get
428                  * an established socket here.
429                  */
430                 WARN_ON(req->sk != NULL);
431
432                 if (seq != tcp_rsk(req)->snt_isn) {
433                         NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
434                         goto out;
435                 }
436
437                 inet_csk_reqsk_queue_drop(sk, req, prev);
438                 goto out;
439
440         case TCP_SYN_SENT:
441         case TCP_SYN_RECV:  /* Cannot happen.
442                                It can, it SYNs are crossed. --ANK */
443                 if (!sock_owned_by_user(sk)) {
444                         sk->sk_err = err;
445                         sk->sk_error_report(sk);                /* Wake people up to see the error (see connect in sock.c) */
446
447                         tcp_done(sk);
448                 } else
449                         sk->sk_err_soft = err;
450                 goto out;
451         }
452
453         if (!sock_owned_by_user(sk) && np->recverr) {
454                 sk->sk_err = err;
455                 sk->sk_error_report(sk);
456         } else
457                 sk->sk_err_soft = err;
458
459 out:
460         bh_unlock_sock(sk);
461         sock_put(sk);
462 }
463
464
465 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
466                               struct request_values *rvp)
467 {
468         struct inet6_request_sock *treq = inet6_rsk(req);
469         struct ipv6_pinfo *np = inet6_sk(sk);
470         struct sk_buff * skb;
471         struct ipv6_txoptions *opt = NULL;
472         struct in6_addr * final_p = NULL, final;
473         struct flowi fl;
474         struct dst_entry *dst;
475         int err = -1;
476
477         memset(&fl, 0, sizeof(fl));
478         fl.proto = IPPROTO_TCP;
479         ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
480         ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
481         fl.fl6_flowlabel = 0;
482         fl.oif = treq->iif;
483         fl.mark = sk->sk_mark;
484         fl.fl_ip_dport = inet_rsk(req)->rmt_port;
485         fl.fl_ip_sport = inet_rsk(req)->loc_port;
486         security_req_classify_flow(req, &fl);
487
488         opt = np->opt;
489         if (opt && opt->srcrt) {
490                 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
491                 ipv6_addr_copy(&final, &fl.fl6_dst);
492                 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
493                 final_p = &final;
494         }
495
496         err = ip6_dst_lookup(sk, &dst, &fl);
497         if (err)
498                 goto done;
499         if (final_p)
500                 ipv6_addr_copy(&fl.fl6_dst, final_p);
501         if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
502                 goto done;
503
504         skb = tcp_make_synack(sk, dst, req, rvp);
505         if (skb) {
506                 struct tcphdr *th = tcp_hdr(skb);
507
508                 th->check = tcp_v6_check(skb->len,
509                                          &treq->loc_addr, &treq->rmt_addr,
510                                          csum_partial(th, skb->len, skb->csum));
511
512                 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
513                 err = ip6_xmit(sk, skb, &fl, opt, 0);
514                 err = net_xmit_eval(err);
515         }
516
517 done:
518         if (opt && opt != np->opt)
519                 sock_kfree_s(sk, opt, opt->tot_len);
520         dst_release(dst);
521         return err;
522 }
523
524 static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
525                              struct request_values *rvp)
526 {
527         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
528         return tcp_v6_send_synack(sk, req, rvp);
529 }
530
531 static inline void syn_flood_warning(struct sk_buff *skb)
532 {
533 #ifdef CONFIG_SYN_COOKIES
534         if (sysctl_tcp_syncookies)
535                 printk(KERN_INFO
536                        "TCPv6: Possible SYN flooding on port %d. "
537                        "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
538         else
539 #endif
540                 printk(KERN_INFO
541                        "TCPv6: Possible SYN flooding on port %d. "
542                        "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
543 }
544
545 static void tcp_v6_reqsk_destructor(struct request_sock *req)
546 {
547         kfree_skb(inet6_rsk(req)->pktopts);
548 }
549
550 #ifdef CONFIG_TCP_MD5SIG
551 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
552                                                    struct in6_addr *addr)
553 {
554         struct tcp_sock *tp = tcp_sk(sk);
555         int i;
556
557         BUG_ON(tp == NULL);
558
559         if (!tp->md5sig_info || !tp->md5sig_info->entries6)
560                 return NULL;
561
562         for (i = 0; i < tp->md5sig_info->entries6; i++) {
563                 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
564                         return &tp->md5sig_info->keys6[i].base;
565         }
566         return NULL;
567 }
568
569 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
570                                                 struct sock *addr_sk)
571 {
572         return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
573 }
574
575 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
576                                                       struct request_sock *req)
577 {
578         return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
579 }
580
581 static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
582                              char *newkey, u8 newkeylen)
583 {
584         /* Add key to the list */
585         struct tcp_md5sig_key *key;
586         struct tcp_sock *tp = tcp_sk(sk);
587         struct tcp6_md5sig_key *keys;
588
589         key = tcp_v6_md5_do_lookup(sk, peer);
590         if (key) {
591                 /* modify existing entry - just update that one */
592                 kfree(key->key);
593                 key->key = newkey;
594                 key->keylen = newkeylen;
595         } else {
596                 /* reallocate new list if current one is full. */
597                 if (!tp->md5sig_info) {
598                         tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
599                         if (!tp->md5sig_info) {
600                                 kfree(newkey);
601                                 return -ENOMEM;
602                         }
603                         sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
604                 }
605                 if (tcp_alloc_md5sig_pool(sk) == NULL) {
606                         kfree(newkey);
607                         return -ENOMEM;
608                 }
609                 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
610                         keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
611                                        (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
612
613                         if (!keys) {
614                                 tcp_free_md5sig_pool();
615                                 kfree(newkey);
616                                 return -ENOMEM;
617                         }
618
619                         if (tp->md5sig_info->entries6)
620                                 memmove(keys, tp->md5sig_info->keys6,
621                                         (sizeof (tp->md5sig_info->keys6[0]) *
622                                          tp->md5sig_info->entries6));
623
624                         kfree(tp->md5sig_info->keys6);
625                         tp->md5sig_info->keys6 = keys;
626                         tp->md5sig_info->alloced6++;
627                 }
628
629                 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
630                                peer);
631                 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
632                 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
633
634                 tp->md5sig_info->entries6++;
635         }
636         return 0;
637 }
638
639 static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
640                                u8 *newkey, __u8 newkeylen)
641 {
642         return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
643                                  newkey, newkeylen);
644 }
645
646 static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
647 {
648         struct tcp_sock *tp = tcp_sk(sk);
649         int i;
650
651         for (i = 0; i < tp->md5sig_info->entries6; i++) {
652                 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
653                         /* Free the key */
654                         kfree(tp->md5sig_info->keys6[i].base.key);
655                         tp->md5sig_info->entries6--;
656
657                         if (tp->md5sig_info->entries6 == 0) {
658                                 kfree(tp->md5sig_info->keys6);
659                                 tp->md5sig_info->keys6 = NULL;
660                                 tp->md5sig_info->alloced6 = 0;
661                         } else {
662                                 /* shrink the database */
663                                 if (tp->md5sig_info->entries6 != i)
664                                         memmove(&tp->md5sig_info->keys6[i],
665                                                 &tp->md5sig_info->keys6[i+1],
666                                                 (tp->md5sig_info->entries6 - i)
667                                                 * sizeof (tp->md5sig_info->keys6[0]));
668                         }
669                         tcp_free_md5sig_pool();
670                         return 0;
671                 }
672         }
673         return -ENOENT;
674 }
675
676 static void tcp_v6_clear_md5_list (struct sock *sk)
677 {
678         struct tcp_sock *tp = tcp_sk(sk);
679         int i;
680
681         if (tp->md5sig_info->entries6) {
682                 for (i = 0; i < tp->md5sig_info->entries6; i++)
683                         kfree(tp->md5sig_info->keys6[i].base.key);
684                 tp->md5sig_info->entries6 = 0;
685                 tcp_free_md5sig_pool();
686         }
687
688         kfree(tp->md5sig_info->keys6);
689         tp->md5sig_info->keys6 = NULL;
690         tp->md5sig_info->alloced6 = 0;
691
692         if (tp->md5sig_info->entries4) {
693                 for (i = 0; i < tp->md5sig_info->entries4; i++)
694                         kfree(tp->md5sig_info->keys4[i].base.key);
695                 tp->md5sig_info->entries4 = 0;
696                 tcp_free_md5sig_pool();
697         }
698
699         kfree(tp->md5sig_info->keys4);
700         tp->md5sig_info->keys4 = NULL;
701         tp->md5sig_info->alloced4 = 0;
702 }
703
704 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
705                                   int optlen)
706 {
707         struct tcp_md5sig cmd;
708         struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
709         u8 *newkey;
710
711         if (optlen < sizeof(cmd))
712                 return -EINVAL;
713
714         if (copy_from_user(&cmd, optval, sizeof(cmd)))
715                 return -EFAULT;
716
717         if (sin6->sin6_family != AF_INET6)
718                 return -EINVAL;
719
720         if (!cmd.tcpm_keylen) {
721                 if (!tcp_sk(sk)->md5sig_info)
722                         return -ENOENT;
723                 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
724                         return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
725                 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
726         }
727
728         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
729                 return -EINVAL;
730
731         if (!tcp_sk(sk)->md5sig_info) {
732                 struct tcp_sock *tp = tcp_sk(sk);
733                 struct tcp_md5sig_info *p;
734
735                 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
736                 if (!p)
737                         return -ENOMEM;
738
739                 tp->md5sig_info = p;
740                 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
741         }
742
743         newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
744         if (!newkey)
745                 return -ENOMEM;
746         if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
747                 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
748                                          newkey, cmd.tcpm_keylen);
749         }
750         return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
751 }
752
753 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
754                                         struct in6_addr *daddr,
755                                         struct in6_addr *saddr, int nbytes)
756 {
757         struct tcp6_pseudohdr *bp;
758         struct scatterlist sg;
759
760         bp = &hp->md5_blk.ip6;
761         /* 1. TCP pseudo-header (RFC2460) */
762         ipv6_addr_copy(&bp->saddr, saddr);
763         ipv6_addr_copy(&bp->daddr, daddr);
764         bp->protocol = cpu_to_be32(IPPROTO_TCP);
765         bp->len = cpu_to_be32(nbytes);
766
767         sg_init_one(&sg, bp, sizeof(*bp));
768         return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
769 }
770
771 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
772                                struct in6_addr *daddr, struct in6_addr *saddr,
773                                struct tcphdr *th)
774 {
775         struct tcp_md5sig_pool *hp;
776         struct hash_desc *desc;
777
778         hp = tcp_get_md5sig_pool();
779         if (!hp)
780                 goto clear_hash_noput;
781         desc = &hp->md5_desc;
782
783         if (crypto_hash_init(desc))
784                 goto clear_hash;
785         if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
786                 goto clear_hash;
787         if (tcp_md5_hash_header(hp, th))
788                 goto clear_hash;
789         if (tcp_md5_hash_key(hp, key))
790                 goto clear_hash;
791         if (crypto_hash_final(desc, md5_hash))
792                 goto clear_hash;
793
794         tcp_put_md5sig_pool();
795         return 0;
796
797 clear_hash:
798         tcp_put_md5sig_pool();
799 clear_hash_noput:
800         memset(md5_hash, 0, 16);
801         return 1;
802 }
803
804 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
805                                struct sock *sk, struct request_sock *req,
806                                struct sk_buff *skb)
807 {
808         struct in6_addr *saddr, *daddr;
809         struct tcp_md5sig_pool *hp;
810         struct hash_desc *desc;
811         struct tcphdr *th = tcp_hdr(skb);
812
813         if (sk) {
814                 saddr = &inet6_sk(sk)->saddr;
815                 daddr = &inet6_sk(sk)->daddr;
816         } else if (req) {
817                 saddr = &inet6_rsk(req)->loc_addr;
818                 daddr = &inet6_rsk(req)->rmt_addr;
819         } else {
820                 struct ipv6hdr *ip6h = ipv6_hdr(skb);
821                 saddr = &ip6h->saddr;
822                 daddr = &ip6h->daddr;
823         }
824
825         hp = tcp_get_md5sig_pool();
826         if (!hp)
827                 goto clear_hash_noput;
828         desc = &hp->md5_desc;
829
830         if (crypto_hash_init(desc))
831                 goto clear_hash;
832
833         if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
834                 goto clear_hash;
835         if (tcp_md5_hash_header(hp, th))
836                 goto clear_hash;
837         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
838                 goto clear_hash;
839         if (tcp_md5_hash_key(hp, key))
840                 goto clear_hash;
841         if (crypto_hash_final(desc, md5_hash))
842                 goto clear_hash;
843
844         tcp_put_md5sig_pool();
845         return 0;
846
847 clear_hash:
848         tcp_put_md5sig_pool();
849 clear_hash_noput:
850         memset(md5_hash, 0, 16);
851         return 1;
852 }
853
854 static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
855 {
856         __u8 *hash_location = NULL;
857         struct tcp_md5sig_key *hash_expected;
858         struct ipv6hdr *ip6h = ipv6_hdr(skb);
859         struct tcphdr *th = tcp_hdr(skb);
860         int genhash;
861         u8 newhash[16];
862
863         hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
864         hash_location = tcp_parse_md5sig_option(th);
865
866         /* We've parsed the options - do we have a hash? */
867         if (!hash_expected && !hash_location)
868                 return 0;
869
870         if (hash_expected && !hash_location) {
871                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
872                 return 1;
873         }
874
875         if (!hash_expected && hash_location) {
876                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
877                 return 1;
878         }
879
880         /* check the signature */
881         genhash = tcp_v6_md5_hash_skb(newhash,
882                                       hash_expected,
883                                       NULL, NULL, skb);
884
885         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
886                 if (net_ratelimit()) {
887                         printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
888                                genhash ? "failed" : "mismatch",
889                                &ip6h->saddr, ntohs(th->source),
890                                &ip6h->daddr, ntohs(th->dest));
891                 }
892                 return 1;
893         }
894         return 0;
895 }
896 #endif
897
898 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
899         .family         =       AF_INET6,
900         .obj_size       =       sizeof(struct tcp6_request_sock),
901         .rtx_syn_ack    =       tcp_v6_rtx_synack,
902         .send_ack       =       tcp_v6_reqsk_send_ack,
903         .destructor     =       tcp_v6_reqsk_destructor,
904         .send_reset     =       tcp_v6_send_reset,
905         .syn_ack_timeout =      tcp_syn_ack_timeout,
906 };
907
908 #ifdef CONFIG_TCP_MD5SIG
909 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
910         .md5_lookup     =       tcp_v6_reqsk_md5_lookup,
911         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
912 };
913 #endif
914
915 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
916         .twsk_obj_size  = sizeof(struct tcp6_timewait_sock),
917         .twsk_unique    = tcp_twsk_unique,
918         .twsk_destructor= tcp_twsk_destructor,
919 };
920
921 static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
922 {
923         struct ipv6_pinfo *np = inet6_sk(sk);
924         struct tcphdr *th = tcp_hdr(skb);
925
926         if (skb->ip_summed == CHECKSUM_PARTIAL) {
927                 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,  0);
928                 skb->csum_start = skb_transport_header(skb) - skb->head;
929                 skb->csum_offset = offsetof(struct tcphdr, check);
930         } else {
931                 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
932                                             csum_partial(th, th->doff<<2,
933                                                          skb->csum));
934         }
935 }
936
937 static int tcp_v6_gso_send_check(struct sk_buff *skb)
938 {
939         struct ipv6hdr *ipv6h;
940         struct tcphdr *th;
941
942         if (!pskb_may_pull(skb, sizeof(*th)))
943                 return -EINVAL;
944
945         ipv6h = ipv6_hdr(skb);
946         th = tcp_hdr(skb);
947
948         th->check = 0;
949         th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
950                                      IPPROTO_TCP, 0);
951         skb->csum_start = skb_transport_header(skb) - skb->head;
952         skb->csum_offset = offsetof(struct tcphdr, check);
953         skb->ip_summed = CHECKSUM_PARTIAL;
954         return 0;
955 }
956
957 static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
958                                          struct sk_buff *skb)
959 {
960         struct ipv6hdr *iph = skb_gro_network_header(skb);
961
962         switch (skb->ip_summed) {
963         case CHECKSUM_COMPLETE:
964                 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
965                                   skb->csum)) {
966                         skb->ip_summed = CHECKSUM_UNNECESSARY;
967                         break;
968                 }
969
970                 /* fall through */
971         case CHECKSUM_NONE:
972                 NAPI_GRO_CB(skb)->flush = 1;
973                 return NULL;
974         }
975
976         return tcp_gro_receive(head, skb);
977 }
978
979 static int tcp6_gro_complete(struct sk_buff *skb)
980 {
981         struct ipv6hdr *iph = ipv6_hdr(skb);
982         struct tcphdr *th = tcp_hdr(skb);
983
984         th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
985                                   &iph->saddr, &iph->daddr, 0);
986         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
987
988         return tcp_gro_complete(skb);
989 }
990
991 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
992                                  u32 ts, struct tcp_md5sig_key *key, int rst)
993 {
994         struct tcphdr *th = tcp_hdr(skb), *t1;
995         struct sk_buff *buff;
996         struct flowi fl;
997         struct net *net = dev_net(skb_dst(skb)->dev);
998         struct sock *ctl_sk = net->ipv6.tcp_sk;
999         unsigned int tot_len = sizeof(struct tcphdr);
1000         struct dst_entry *dst;
1001         __be32 *topt;
1002
1003         if (ts)
1004                 tot_len += TCPOLEN_TSTAMP_ALIGNED;
1005 #ifdef CONFIG_TCP_MD5SIG
1006         if (key)
1007                 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1008 #endif
1009
1010         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1011                          GFP_ATOMIC);
1012         if (buff == NULL)
1013                 return;
1014
1015         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1016
1017         t1 = (struct tcphdr *) skb_push(buff, tot_len);
1018         skb_reset_transport_header(skb);
1019
1020         /* Swap the send and the receive. */
1021         memset(t1, 0, sizeof(*t1));
1022         t1->dest = th->source;
1023         t1->source = th->dest;
1024         t1->doff = tot_len / 4;
1025         t1->seq = htonl(seq);
1026         t1->ack_seq = htonl(ack);
1027         t1->ack = !rst || !th->ack;
1028         t1->rst = rst;
1029         t1->window = htons(win);
1030
1031         topt = (__be32 *)(t1 + 1);
1032
1033         if (ts) {
1034                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1035                                 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1036                 *topt++ = htonl(tcp_time_stamp);
1037                 *topt++ = htonl(ts);
1038         }
1039
1040 #ifdef CONFIG_TCP_MD5SIG
1041         if (key) {
1042                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1043                                 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1044                 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
1045                                     &ipv6_hdr(skb)->saddr,
1046                                     &ipv6_hdr(skb)->daddr, t1);
1047         }
1048 #endif
1049
1050         buff->csum = csum_partial(t1, tot_len, 0);
1051
1052         memset(&fl, 0, sizeof(fl));
1053         ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1054         ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1055
1056         t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1057                                     tot_len, IPPROTO_TCP,
1058                                     buff->csum);
1059
1060         fl.proto = IPPROTO_TCP;
1061         fl.oif = inet6_iif(skb);
1062         fl.fl_ip_dport = t1->dest;
1063         fl.fl_ip_sport = t1->source;
1064         security_skb_classify_flow(skb, &fl);
1065
1066         /* Pass a socket to ip6_dst_lookup either it is for RST
1067          * Underlying function will use this to retrieve the network
1068          * namespace
1069          */
1070         if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
1071                 if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
1072                         skb_dst_set(buff, dst);
1073                         ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
1074                         TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1075                         if (rst)
1076                                 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1077                         return;
1078                 }
1079         }
1080
1081         kfree_skb(buff);
1082 }
1083
1084 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1085 {
1086         struct tcphdr *th = tcp_hdr(skb);
1087         u32 seq = 0, ack_seq = 0;
1088         struct tcp_md5sig_key *key = NULL;
1089
1090         if (th->rst)
1091                 return;
1092
1093         if (!ipv6_unicast_destination(skb))
1094                 return;
1095
1096 #ifdef CONFIG_TCP_MD5SIG
1097         if (sk)
1098                 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
1099 #endif
1100
1101         if (th->ack)
1102                 seq = ntohl(th->ack_seq);
1103         else
1104                 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1105                           (th->doff << 2);
1106
1107         tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1);
1108 }
1109
1110 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1111                             struct tcp_md5sig_key *key)
1112 {
1113         tcp_v6_send_response(skb, seq, ack, win, ts, key, 0);
1114 }
1115
1116 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1117 {
1118         struct inet_timewait_sock *tw = inet_twsk(sk);
1119         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1120
1121         tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1122                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1123                         tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw));
1124
1125         inet_twsk_put(tw);
1126 }
1127
1128 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1129                                   struct request_sock *req)
1130 {
1131         tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1132                         tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr));
1133 }
1134
1135
1136 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1137 {
1138         struct request_sock *req, **prev;
1139         const struct tcphdr *th = tcp_hdr(skb);
1140         struct sock *nsk;
1141
1142         /* Find possible connection requests. */
1143         req = inet6_csk_search_req(sk, &prev, th->source,
1144                                    &ipv6_hdr(skb)->saddr,
1145                                    &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1146         if (req)
1147                 return tcp_check_req(sk, skb, req, prev);
1148
1149         nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1150                         &ipv6_hdr(skb)->saddr, th->source,
1151                         &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1152
1153         if (nsk) {
1154                 if (nsk->sk_state != TCP_TIME_WAIT) {
1155                         bh_lock_sock(nsk);
1156                         return nsk;
1157                 }
1158                 inet_twsk_put(inet_twsk(nsk));
1159                 return NULL;
1160         }
1161
1162 #ifdef CONFIG_SYN_COOKIES
1163         if (!th->rst && !th->syn && th->ack)
1164                 sk = cookie_v6_check(sk, skb);
1165 #endif
1166         return sk;
1167 }
1168
1169 /* FIXME: this is substantially similar to the ipv4 code.
1170  * Can some kind of merge be done? -- erics
1171  */
1172 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1173 {
1174         struct tcp_extend_values tmp_ext;
1175         struct tcp_options_received tmp_opt;
1176         u8 *hash_location;
1177         struct request_sock *req;
1178         struct inet6_request_sock *treq;
1179         struct ipv6_pinfo *np = inet6_sk(sk);
1180         struct tcp_sock *tp = tcp_sk(sk);
1181         __u32 isn = TCP_SKB_CB(skb)->when;
1182 #ifdef CONFIG_SYN_COOKIES
1183         int want_cookie = 0;
1184 #else
1185 #define want_cookie 0
1186 #endif
1187
1188         if (skb->protocol == htons(ETH_P_IP))
1189                 return tcp_v4_conn_request(sk, skb);
1190
1191         if (!ipv6_unicast_destination(skb))
1192                 goto drop;
1193
1194         if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1195                 if (net_ratelimit())
1196                         syn_flood_warning(skb);
1197 #ifdef CONFIG_SYN_COOKIES
1198                 if (sysctl_tcp_syncookies)
1199                         want_cookie = 1;
1200                 else
1201 #endif
1202                 goto drop;
1203         }
1204
1205         if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1206                 goto drop;
1207
1208         req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1209         if (req == NULL)
1210                 goto drop;
1211
1212 #ifdef CONFIG_TCP_MD5SIG
1213         tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1214 #endif
1215
1216         tcp_clear_options(&tmp_opt);
1217         tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1218         tmp_opt.user_mss = tp->rx_opt.user_mss;
1219         tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1220
1221         if (tmp_opt.cookie_plus > 0 &&
1222             tmp_opt.saw_tstamp &&
1223             !tp->rx_opt.cookie_out_never &&
1224             (sysctl_tcp_cookie_size > 0 ||
1225              (tp->cookie_values != NULL &&
1226               tp->cookie_values->cookie_desired > 0))) {
1227                 u8 *c;
1228                 u32 *d;
1229                 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1230                 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1231
1232                 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1233                         goto drop_and_free;
1234
1235                 /* Secret recipe starts with IP addresses */
1236                 d = &ipv6_hdr(skb)->daddr.s6_addr32[0];
1237                 *mess++ ^= *d++;
1238                 *mess++ ^= *d++;
1239                 *mess++ ^= *d++;
1240                 *mess++ ^= *d++;
1241                 d = &ipv6_hdr(skb)->saddr.s6_addr32[0];
1242                 *mess++ ^= *d++;
1243                 *mess++ ^= *d++;
1244                 *mess++ ^= *d++;
1245                 *mess++ ^= *d++;
1246
1247                 /* plus variable length Initiator Cookie */
1248                 c = (u8 *)mess;
1249                 while (l-- > 0)
1250                         *c++ ^= *hash_location++;
1251
1252 #ifdef CONFIG_SYN_COOKIES
1253                 want_cookie = 0;        /* not our kind of cookie */
1254 #endif
1255                 tmp_ext.cookie_out_never = 0; /* false */
1256                 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1257         } else if (!tp->rx_opt.cookie_in_always) {
1258                 /* redundant indications, but ensure initialization. */
1259                 tmp_ext.cookie_out_never = 1; /* true */
1260                 tmp_ext.cookie_plus = 0;
1261         } else {
1262                 goto drop_and_free;
1263         }
1264         tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1265
1266         if (want_cookie && !tmp_opt.saw_tstamp)
1267                 tcp_clear_options(&tmp_opt);
1268
1269         tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1270         tcp_openreq_init(req, &tmp_opt, skb);
1271
1272         treq = inet6_rsk(req);
1273         ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1274         ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
1275         if (!want_cookie)
1276                 TCP_ECN_create_request(req, tcp_hdr(skb));
1277
1278         if (want_cookie) {
1279                 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1280                 req->cookie_ts = tmp_opt.tstamp_ok;
1281         } else if (!isn) {
1282                 if (ipv6_opt_accepted(sk, skb) ||
1283                     np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1284                     np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1285                         atomic_inc(&skb->users);
1286                         treq->pktopts = skb;
1287                 }
1288                 treq->iif = sk->sk_bound_dev_if;
1289
1290                 /* So that link locals have meaning */
1291                 if (!sk->sk_bound_dev_if &&
1292                     ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1293                         treq->iif = inet6_iif(skb);
1294
1295                 isn = tcp_v6_init_sequence(skb);
1296         }
1297         tcp_rsk(req)->snt_isn = isn;
1298
1299         security_inet_conn_request(sk, skb, req);
1300
1301         if (tcp_v6_send_synack(sk, req,
1302                                (struct request_values *)&tmp_ext) ||
1303             want_cookie)
1304                 goto drop_and_free;
1305
1306         inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1307         return 0;
1308
1309 drop_and_free:
1310         reqsk_free(req);
1311 drop:
1312         return 0; /* don't send reset */
1313 }
1314
1315 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1316                                           struct request_sock *req,
1317                                           struct dst_entry *dst)
1318 {
1319         struct inet6_request_sock *treq;
1320         struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1321         struct tcp6_sock *newtcp6sk;
1322         struct inet_sock *newinet;
1323         struct tcp_sock *newtp;
1324         struct sock *newsk;
1325         struct ipv6_txoptions *opt;
1326 #ifdef CONFIG_TCP_MD5SIG
1327         struct tcp_md5sig_key *key;
1328 #endif
1329
1330         if (skb->protocol == htons(ETH_P_IP)) {
1331                 /*
1332                  *      v6 mapped
1333                  */
1334
1335                 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1336
1337                 if (newsk == NULL)
1338                         return NULL;
1339
1340                 newtcp6sk = (struct tcp6_sock *)newsk;
1341                 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1342
1343                 newinet = inet_sk(newsk);
1344                 newnp = inet6_sk(newsk);
1345                 newtp = tcp_sk(newsk);
1346
1347                 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1348
1349                 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1350
1351                 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1352
1353                 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1354
1355                 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1356                 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1357 #ifdef CONFIG_TCP_MD5SIG
1358                 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1359 #endif
1360
1361                 newnp->pktoptions  = NULL;
1362                 newnp->opt         = NULL;
1363                 newnp->mcast_oif   = inet6_iif(skb);
1364                 newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1365
1366                 /*
1367                  * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1368                  * here, tcp_create_openreq_child now does this for us, see the comment in
1369                  * that function for the gory details. -acme
1370                  */
1371
1372                 /* It is tricky place. Until this moment IPv4 tcp
1373                    worked with IPv6 icsk.icsk_af_ops.
1374                    Sync it now.
1375                  */
1376                 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1377
1378                 return newsk;
1379         }
1380
1381         treq = inet6_rsk(req);
1382         opt = np->opt;
1383
1384         if (sk_acceptq_is_full(sk))
1385                 goto out_overflow;
1386
1387         if (dst == NULL) {
1388                 struct in6_addr *final_p = NULL, final;
1389                 struct flowi fl;
1390
1391                 memset(&fl, 0, sizeof(fl));
1392                 fl.proto = IPPROTO_TCP;
1393                 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1394                 if (opt && opt->srcrt) {
1395                         struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1396                         ipv6_addr_copy(&final, &fl.fl6_dst);
1397                         ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1398                         final_p = &final;
1399                 }
1400                 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1401                 fl.oif = sk->sk_bound_dev_if;
1402                 fl.mark = sk->sk_mark;
1403                 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1404                 fl.fl_ip_sport = inet_rsk(req)->loc_port;
1405                 security_req_classify_flow(req, &fl);
1406
1407                 if (ip6_dst_lookup(sk, &dst, &fl))
1408                         goto out;
1409
1410                 if (final_p)
1411                         ipv6_addr_copy(&fl.fl6_dst, final_p);
1412
1413                 if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
1414                         goto out;
1415         }
1416
1417         newsk = tcp_create_openreq_child(sk, req, skb);
1418         if (newsk == NULL)
1419                 goto out;
1420
1421         /*
1422          * No need to charge this sock to the relevant IPv6 refcnt debug socks
1423          * count here, tcp_create_openreq_child now does this for us, see the
1424          * comment in that function for the gory details. -acme
1425          */
1426
1427         newsk->sk_gso_type = SKB_GSO_TCPV6;
1428         __ip6_dst_store(newsk, dst, NULL, NULL);
1429
1430         newtcp6sk = (struct tcp6_sock *)newsk;
1431         inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1432
1433         newtp = tcp_sk(newsk);
1434         newinet = inet_sk(newsk);
1435         newnp = inet6_sk(newsk);
1436
1437         memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1438
1439         ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1440         ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1441         ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1442         newsk->sk_bound_dev_if = treq->iif;
1443
1444         /* Now IPv6 options...
1445
1446            First: no IPv4 options.
1447          */
1448         newinet->opt = NULL;
1449         newnp->ipv6_fl_list = NULL;
1450
1451         /* Clone RX bits */
1452         newnp->rxopt.all = np->rxopt.all;
1453
1454         /* Clone pktoptions received with SYN */
1455         newnp->pktoptions = NULL;
1456         if (treq->pktopts != NULL) {
1457                 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1458                 kfree_skb(treq->pktopts);
1459                 treq->pktopts = NULL;
1460                 if (newnp->pktoptions)
1461                         skb_set_owner_r(newnp->pktoptions, newsk);
1462         }
1463         newnp->opt        = NULL;
1464         newnp->mcast_oif  = inet6_iif(skb);
1465         newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1466
1467         /* Clone native IPv6 options from listening socket (if any)
1468
1469            Yes, keeping reference count would be much more clever,
1470            but we make one more one thing there: reattach optmem
1471            to newsk.
1472          */
1473         if (opt) {
1474                 newnp->opt = ipv6_dup_options(newsk, opt);
1475                 if (opt != np->opt)
1476                         sock_kfree_s(sk, opt, opt->tot_len);
1477         }
1478
1479         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1480         if (newnp->opt)
1481                 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1482                                                      newnp->opt->opt_flen);
1483
1484         tcp_mtup_init(newsk);
1485         tcp_sync_mss(newsk, dst_mtu(dst));
1486         newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1487         tcp_initialize_rcv_mss(newsk);
1488
1489         newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1490         newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1491
1492 #ifdef CONFIG_TCP_MD5SIG
1493         /* Copy over the MD5 key from the original socket */
1494         if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1495                 /* We're using one, so create a matching key
1496                  * on the newsk structure. If we fail to get
1497                  * memory, then we end up not copying the key
1498                  * across. Shucks.
1499                  */
1500                 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1501                 if (newkey != NULL)
1502                         tcp_v6_md5_do_add(newsk, &newnp->daddr,
1503                                           newkey, key->keylen);
1504         }
1505 #endif
1506
1507         __inet6_hash(newsk, NULL);
1508         __inet_inherit_port(sk, newsk);
1509
1510         return newsk;
1511
1512 out_overflow:
1513         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1514 out:
1515         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1516         if (opt && opt != np->opt)
1517                 sock_kfree_s(sk, opt, opt->tot_len);
1518         dst_release(dst);
1519         return NULL;
1520 }
1521
1522 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1523 {
1524         if (skb->ip_summed == CHECKSUM_COMPLETE) {
1525                 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1526                                   &ipv6_hdr(skb)->daddr, skb->csum)) {
1527                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1528                         return 0;
1529                 }
1530         }
1531
1532         skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1533                                               &ipv6_hdr(skb)->saddr,
1534                                               &ipv6_hdr(skb)->daddr, 0));
1535
1536         if (skb->len <= 76) {
1537                 return __skb_checksum_complete(skb);
1538         }
1539         return 0;
1540 }
1541
1542 /* The socket must have it's spinlock held when we get
1543  * here.
1544  *
1545  * We have a potential double-lock case here, so even when
1546  * doing backlog processing we use the BH locking scheme.
1547  * This is because we cannot sleep with the original spinlock
1548  * held.
1549  */
1550 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1551 {
1552         struct ipv6_pinfo *np = inet6_sk(sk);
1553         struct tcp_sock *tp;
1554         struct sk_buff *opt_skb = NULL;
1555
1556         /* Imagine: socket is IPv6. IPv4 packet arrives,
1557            goes to IPv4 receive handler and backlogged.
1558            From backlog it always goes here. Kerboom...
1559            Fortunately, tcp_rcv_established and rcv_established
1560            handle them correctly, but it is not case with
1561            tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1562          */
1563
1564         if (skb->protocol == htons(ETH_P_IP))
1565                 return tcp_v4_do_rcv(sk, skb);
1566
1567 #ifdef CONFIG_TCP_MD5SIG
1568         if (tcp_v6_inbound_md5_hash (sk, skb))
1569                 goto discard;
1570 #endif
1571
1572         if (sk_filter(sk, skb))
1573                 goto discard;
1574
1575         /*
1576          *      socket locking is here for SMP purposes as backlog rcv
1577          *      is currently called with bh processing disabled.
1578          */
1579
1580         /* Do Stevens' IPV6_PKTOPTIONS.
1581
1582            Yes, guys, it is the only place in our code, where we
1583            may make it not affecting IPv4.
1584            The rest of code is protocol independent,
1585            and I do not like idea to uglify IPv4.
1586
1587            Actually, all the idea behind IPV6_PKTOPTIONS
1588            looks not very well thought. For now we latch
1589            options, received in the last packet, enqueued
1590            by tcp. Feel free to propose better solution.
1591                                                --ANK (980728)
1592          */
1593         if (np->rxopt.all)
1594                 opt_skb = skb_clone(skb, GFP_ATOMIC);
1595
1596         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1597                 TCP_CHECK_TIMER(sk);
1598                 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1599                         goto reset;
1600                 TCP_CHECK_TIMER(sk);
1601                 if (opt_skb)
1602                         goto ipv6_pktoptions;
1603                 return 0;
1604         }
1605
1606         if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1607                 goto csum_err;
1608
1609         if (sk->sk_state == TCP_LISTEN) {
1610                 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1611                 if (!nsk)
1612                         goto discard;
1613
1614                 /*
1615                  * Queue it on the new socket if the new socket is active,
1616                  * otherwise we just shortcircuit this and continue with
1617                  * the new socket..
1618                  */
1619                 if(nsk != sk) {
1620                         if (tcp_child_process(sk, nsk, skb))
1621                                 goto reset;
1622                         if (opt_skb)
1623                                 __kfree_skb(opt_skb);
1624                         return 0;
1625                 }
1626         }
1627
1628         TCP_CHECK_TIMER(sk);
1629         if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1630                 goto reset;
1631         TCP_CHECK_TIMER(sk);
1632         if (opt_skb)
1633                 goto ipv6_pktoptions;
1634         return 0;
1635
1636 reset:
1637         tcp_v6_send_reset(sk, skb);
1638 discard:
1639         if (opt_skb)
1640                 __kfree_skb(opt_skb);
1641         kfree_skb(skb);
1642         return 0;
1643 csum_err:
1644         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1645         goto discard;
1646
1647
1648 ipv6_pktoptions:
1649         /* Do you ask, what is it?
1650
1651            1. skb was enqueued by tcp.
1652            2. skb is added to tail of read queue, rather than out of order.
1653            3. socket is not in passive state.
1654            4. Finally, it really contains options, which user wants to receive.
1655          */
1656         tp = tcp_sk(sk);
1657         if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1658             !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1659                 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1660                         np->mcast_oif = inet6_iif(opt_skb);
1661                 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1662                         np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1663                 if (ipv6_opt_accepted(sk, opt_skb)) {
1664                         skb_set_owner_r(opt_skb, sk);
1665                         opt_skb = xchg(&np->pktoptions, opt_skb);
1666                 } else {
1667                         __kfree_skb(opt_skb);
1668                         opt_skb = xchg(&np->pktoptions, NULL);
1669                 }
1670         }
1671
1672         kfree_skb(opt_skb);
1673         return 0;
1674 }
1675
1676 static int tcp_v6_rcv(struct sk_buff *skb)
1677 {
1678         struct tcphdr *th;
1679         struct sock *sk;
1680         int ret;
1681         struct net *net = dev_net(skb->dev);
1682
1683         if (skb->pkt_type != PACKET_HOST)
1684                 goto discard_it;
1685
1686         /*
1687          *      Count it even if it's bad.
1688          */
1689         TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1690
1691         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1692                 goto discard_it;
1693
1694         th = tcp_hdr(skb);
1695
1696         if (th->doff < sizeof(struct tcphdr)/4)
1697                 goto bad_packet;
1698         if (!pskb_may_pull(skb, th->doff*4))
1699                 goto discard_it;
1700
1701         if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1702                 goto bad_packet;
1703
1704         th = tcp_hdr(skb);
1705         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1706         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1707                                     skb->len - th->doff*4);
1708         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1709         TCP_SKB_CB(skb)->when = 0;
1710         TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
1711         TCP_SKB_CB(skb)->sacked = 0;
1712
1713         sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1714         if (!sk)
1715                 goto no_tcp_socket;
1716
1717 process:
1718         if (sk->sk_state == TCP_TIME_WAIT)
1719                 goto do_time_wait;
1720
1721         if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1722                 goto discard_and_relse;
1723
1724         if (sk_filter(sk, skb))
1725                 goto discard_and_relse;
1726
1727         skb->dev = NULL;
1728
1729         bh_lock_sock_nested(sk);
1730         ret = 0;
1731         if (!sock_owned_by_user(sk)) {
1732 #ifdef CONFIG_NET_DMA
1733                 struct tcp_sock *tp = tcp_sk(sk);
1734                 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1735                         tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1736                 if (tp->ucopy.dma_chan)
1737                         ret = tcp_v6_do_rcv(sk, skb);
1738                 else
1739 #endif
1740                 {
1741                         if (!tcp_prequeue(sk, skb))
1742                                 ret = tcp_v6_do_rcv(sk, skb);
1743                 }
1744         } else if (unlikely(sk_add_backlog(sk, skb))) {
1745                 bh_unlock_sock(sk);
1746                 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1747                 goto discard_and_relse;
1748         }
1749         bh_unlock_sock(sk);
1750
1751         sock_put(sk);
1752         return ret ? -1 : 0;
1753
1754 no_tcp_socket:
1755         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1756                 goto discard_it;
1757
1758         if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1759 bad_packet:
1760                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1761         } else {
1762                 tcp_v6_send_reset(NULL, skb);
1763         }
1764
1765 discard_it:
1766
1767         /*
1768          *      Discard frame
1769          */
1770
1771         kfree_skb(skb);
1772         return 0;
1773
1774 discard_and_relse:
1775         sock_put(sk);
1776         goto discard_it;
1777
1778 do_time_wait:
1779         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1780                 inet_twsk_put(inet_twsk(sk));
1781                 goto discard_it;
1782         }
1783
1784         if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1785                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1786                 inet_twsk_put(inet_twsk(sk));
1787                 goto discard_it;
1788         }
1789
1790         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1791         case TCP_TW_SYN:
1792         {
1793                 struct sock *sk2;
1794
1795                 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1796                                             &ipv6_hdr(skb)->daddr,
1797                                             ntohs(th->dest), inet6_iif(skb));
1798                 if (sk2 != NULL) {
1799                         struct inet_timewait_sock *tw = inet_twsk(sk);
1800                         inet_twsk_deschedule(tw, &tcp_death_row);
1801                         inet_twsk_put(tw);
1802                         sk = sk2;
1803                         goto process;
1804                 }
1805                 /* Fall through to ACK */
1806         }
1807         case TCP_TW_ACK:
1808                 tcp_v6_timewait_ack(sk, skb);
1809                 break;
1810         case TCP_TW_RST:
1811                 goto no_tcp_socket;
1812         case TCP_TW_SUCCESS:;
1813         }
1814         goto discard_it;
1815 }
1816
1817 static int tcp_v6_remember_stamp(struct sock *sk)
1818 {
1819         /* Alas, not yet... */
1820         return 0;
1821 }
1822
1823 static const struct inet_connection_sock_af_ops ipv6_specific = {
1824         .queue_xmit        = inet6_csk_xmit,
1825         .send_check        = tcp_v6_send_check,
1826         .rebuild_header    = inet6_sk_rebuild_header,
1827         .conn_request      = tcp_v6_conn_request,
1828         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1829         .remember_stamp    = tcp_v6_remember_stamp,
1830         .net_header_len    = sizeof(struct ipv6hdr),
1831         .setsockopt        = ipv6_setsockopt,
1832         .getsockopt        = ipv6_getsockopt,
1833         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1834         .sockaddr_len      = sizeof(struct sockaddr_in6),
1835         .bind_conflict     = inet6_csk_bind_conflict,
1836 #ifdef CONFIG_COMPAT
1837         .compat_setsockopt = compat_ipv6_setsockopt,
1838         .compat_getsockopt = compat_ipv6_getsockopt,
1839 #endif
1840 };
1841
1842 #ifdef CONFIG_TCP_MD5SIG
1843 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1844         .md5_lookup     =       tcp_v6_md5_lookup,
1845         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
1846         .md5_add        =       tcp_v6_md5_add_func,
1847         .md5_parse      =       tcp_v6_parse_md5_keys,
1848 };
1849 #endif
1850
1851 /*
1852  *      TCP over IPv4 via INET6 API
1853  */
1854
1855 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1856         .queue_xmit        = ip_queue_xmit,
1857         .send_check        = tcp_v4_send_check,
1858         .rebuild_header    = inet_sk_rebuild_header,
1859         .conn_request      = tcp_v6_conn_request,
1860         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1861         .remember_stamp    = tcp_v4_remember_stamp,
1862         .net_header_len    = sizeof(struct iphdr),
1863         .setsockopt        = ipv6_setsockopt,
1864         .getsockopt        = ipv6_getsockopt,
1865         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1866         .sockaddr_len      = sizeof(struct sockaddr_in6),
1867         .bind_conflict     = inet6_csk_bind_conflict,
1868 #ifdef CONFIG_COMPAT
1869         .compat_setsockopt = compat_ipv6_setsockopt,
1870         .compat_getsockopt = compat_ipv6_getsockopt,
1871 #endif
1872 };
1873
1874 #ifdef CONFIG_TCP_MD5SIG
1875 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1876         .md5_lookup     =       tcp_v4_md5_lookup,
1877         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1878         .md5_add        =       tcp_v6_md5_add_func,
1879         .md5_parse      =       tcp_v6_parse_md5_keys,
1880 };
1881 #endif
1882
1883 /* NOTE: A lot of things set to zero explicitly by call to
1884  *       sk_alloc() so need not be done here.
1885  */
1886 static int tcp_v6_init_sock(struct sock *sk)
1887 {
1888         struct inet_connection_sock *icsk = inet_csk(sk);
1889         struct tcp_sock *tp = tcp_sk(sk);
1890
1891         skb_queue_head_init(&tp->out_of_order_queue);
1892         tcp_init_xmit_timers(sk);
1893         tcp_prequeue_init(tp);
1894
1895         icsk->icsk_rto = TCP_TIMEOUT_INIT;
1896         tp->mdev = TCP_TIMEOUT_INIT;
1897
1898         /* So many TCP implementations out there (incorrectly) count the
1899          * initial SYN frame in their delayed-ACK and congestion control
1900          * algorithms that we must have the following bandaid to talk
1901          * efficiently to them.  -DaveM
1902          */
1903         tp->snd_cwnd = 2;
1904
1905         /* See draft-stevens-tcpca-spec-01 for discussion of the
1906          * initialization of these values.
1907          */
1908         tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1909         tp->snd_cwnd_clamp = ~0;
1910         tp->mss_cache = TCP_MSS_DEFAULT;
1911
1912         tp->reordering = sysctl_tcp_reordering;
1913
1914         sk->sk_state = TCP_CLOSE;
1915
1916         icsk->icsk_af_ops = &ipv6_specific;
1917         icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1918         icsk->icsk_sync_mss = tcp_sync_mss;
1919         sk->sk_write_space = sk_stream_write_space;
1920         sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1921
1922 #ifdef CONFIG_TCP_MD5SIG
1923         tp->af_specific = &tcp_sock_ipv6_specific;
1924 #endif
1925
1926         /* TCP Cookie Transactions */
1927         if (sysctl_tcp_cookie_size > 0) {
1928                 /* Default, cookies without s_data_payload. */
1929                 tp->cookie_values =
1930                         kzalloc(sizeof(*tp->cookie_values),
1931                                 sk->sk_allocation);
1932                 if (tp->cookie_values != NULL)
1933                         kref_init(&tp->cookie_values->kref);
1934         }
1935         /* Presumed zeroed, in order of appearance:
1936          *      cookie_in_always, cookie_out_never,
1937          *      s_data_constant, s_data_in, s_data_out
1938          */
1939         sk->sk_sndbuf = sysctl_tcp_wmem[1];
1940         sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1941
1942         local_bh_disable();
1943         percpu_counter_inc(&tcp_sockets_allocated);
1944         local_bh_enable();
1945
1946         return 0;
1947 }
1948
1949 static void tcp_v6_destroy_sock(struct sock *sk)
1950 {
1951 #ifdef CONFIG_TCP_MD5SIG
1952         /* Clean up the MD5 key list */
1953         if (tcp_sk(sk)->md5sig_info)
1954                 tcp_v6_clear_md5_list(sk);
1955 #endif
1956         tcp_v4_destroy_sock(sk);
1957         inet6_destroy_sock(sk);
1958 }
1959
1960 #ifdef CONFIG_PROC_FS
1961 /* Proc filesystem TCPv6 sock list dumping. */
1962 static void get_openreq6(struct seq_file *seq,
1963                          struct sock *sk, struct request_sock *req, int i, int uid)
1964 {
1965         int ttd = req->expires - jiffies;
1966         struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1967         struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1968
1969         if (ttd < 0)
1970                 ttd = 0;
1971
1972         seq_printf(seq,
1973                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1974                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1975                    i,
1976                    src->s6_addr32[0], src->s6_addr32[1],
1977                    src->s6_addr32[2], src->s6_addr32[3],
1978                    ntohs(inet_rsk(req)->loc_port),
1979                    dest->s6_addr32[0], dest->s6_addr32[1],
1980                    dest->s6_addr32[2], dest->s6_addr32[3],
1981                    ntohs(inet_rsk(req)->rmt_port),
1982                    TCP_SYN_RECV,
1983                    0,0, /* could print option size, but that is af dependent. */
1984                    1,   /* timers active (only the expire timer) */
1985                    jiffies_to_clock_t(ttd),
1986                    req->retrans,
1987                    uid,
1988                    0,  /* non standard timer */
1989                    0, /* open_requests have no inode */
1990                    0, req);
1991 }
1992
1993 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1994 {
1995         struct in6_addr *dest, *src;
1996         __u16 destp, srcp;
1997         int timer_active;
1998         unsigned long timer_expires;
1999         struct inet_sock *inet = inet_sk(sp);
2000         struct tcp_sock *tp = tcp_sk(sp);
2001         const struct inet_connection_sock *icsk = inet_csk(sp);
2002         struct ipv6_pinfo *np = inet6_sk(sp);
2003
2004         dest  = &np->daddr;
2005         src   = &np->rcv_saddr;
2006         destp = ntohs(inet->inet_dport);
2007         srcp  = ntohs(inet->inet_sport);
2008
2009         if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2010                 timer_active    = 1;
2011                 timer_expires   = icsk->icsk_timeout;
2012         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2013                 timer_active    = 4;
2014                 timer_expires   = icsk->icsk_timeout;
2015         } else if (timer_pending(&sp->sk_timer)) {
2016                 timer_active    = 2;
2017                 timer_expires   = sp->sk_timer.expires;
2018         } else {
2019                 timer_active    = 0;
2020                 timer_expires = jiffies;
2021         }
2022
2023         seq_printf(seq,
2024                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2025                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n",
2026                    i,
2027                    src->s6_addr32[0], src->s6_addr32[1],
2028                    src->s6_addr32[2], src->s6_addr32[3], srcp,
2029                    dest->s6_addr32[0], dest->s6_addr32[1],
2030                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
2031                    sp->sk_state,
2032                    tp->write_seq-tp->snd_una,
2033                    (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
2034                    timer_active,
2035                    jiffies_to_clock_t(timer_expires - jiffies),
2036                    icsk->icsk_retransmits,
2037                    sock_i_uid(sp),
2038                    icsk->icsk_probes_out,
2039                    sock_i_ino(sp),
2040                    atomic_read(&sp->sk_refcnt), sp,
2041                    jiffies_to_clock_t(icsk->icsk_rto),
2042                    jiffies_to_clock_t(icsk->icsk_ack.ato),
2043                    (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
2044                    tp->snd_cwnd,
2045                    tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
2046                    );
2047 }
2048
2049 static void get_timewait6_sock(struct seq_file *seq,
2050                                struct inet_timewait_sock *tw, int i)
2051 {
2052         struct in6_addr *dest, *src;
2053         __u16 destp, srcp;
2054         struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2055         int ttd = tw->tw_ttd - jiffies;
2056
2057         if (ttd < 0)
2058                 ttd = 0;
2059
2060         dest = &tw6->tw_v6_daddr;
2061         src  = &tw6->tw_v6_rcv_saddr;
2062         destp = ntohs(tw->tw_dport);
2063         srcp  = ntohs(tw->tw_sport);
2064
2065         seq_printf(seq,
2066                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2067                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2068                    i,
2069                    src->s6_addr32[0], src->s6_addr32[1],
2070                    src->s6_addr32[2], src->s6_addr32[3], srcp,
2071                    dest->s6_addr32[0], dest->s6_addr32[1],
2072                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
2073                    tw->tw_substate, 0, 0,
2074                    3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2075                    atomic_read(&tw->tw_refcnt), tw);
2076 }
2077
2078 static int tcp6_seq_show(struct seq_file *seq, void *v)
2079 {
2080         struct tcp_iter_state *st;
2081
2082         if (v == SEQ_START_TOKEN) {
2083                 seq_puts(seq,
2084                          "  sl  "
2085                          "local_address                         "
2086                          "remote_address                        "
2087                          "st tx_queue rx_queue tr tm->when retrnsmt"
2088                          "   uid  timeout inode\n");
2089                 goto out;
2090         }
2091         st = seq->private;
2092
2093         switch (st->state) {
2094         case TCP_SEQ_STATE_LISTENING:
2095         case TCP_SEQ_STATE_ESTABLISHED:
2096                 get_tcp6_sock(seq, v, st->num);
2097                 break;
2098         case TCP_SEQ_STATE_OPENREQ:
2099                 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2100                 break;
2101         case TCP_SEQ_STATE_TIME_WAIT:
2102                 get_timewait6_sock(seq, v, st->num);
2103                 break;
2104         }
2105 out:
2106         return 0;
2107 }
2108
2109 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2110         .name           = "tcp6",
2111         .family         = AF_INET6,
2112         .seq_fops       = {
2113                 .owner          = THIS_MODULE,
2114         },
2115         .seq_ops        = {
2116                 .show           = tcp6_seq_show,
2117         },
2118 };
2119
2120 int __net_init tcp6_proc_init(struct net *net)
2121 {
2122         return tcp_proc_register(net, &tcp6_seq_afinfo);
2123 }
2124
2125 void tcp6_proc_exit(struct net *net)
2126 {
2127         tcp_proc_unregister(net, &tcp6_seq_afinfo);
2128 }
2129 #endif
2130
2131 struct proto tcpv6_prot = {
2132         .name                   = "TCPv6",
2133         .owner                  = THIS_MODULE,
2134         .close                  = tcp_close,
2135         .connect                = tcp_v6_connect,
2136         .disconnect             = tcp_disconnect,
2137         .accept                 = inet_csk_accept,
2138         .ioctl                  = tcp_ioctl,
2139         .init                   = tcp_v6_init_sock,
2140         .destroy                = tcp_v6_destroy_sock,
2141         .shutdown               = tcp_shutdown,
2142         .setsockopt             = tcp_setsockopt,
2143         .getsockopt             = tcp_getsockopt,
2144         .recvmsg                = tcp_recvmsg,
2145         .backlog_rcv            = tcp_v6_do_rcv,
2146         .hash                   = tcp_v6_hash,
2147         .unhash                 = inet_unhash,
2148         .get_port               = inet_csk_get_port,
2149         .enter_memory_pressure  = tcp_enter_memory_pressure,
2150         .sockets_allocated      = &tcp_sockets_allocated,
2151         .memory_allocated       = &tcp_memory_allocated,
2152         .memory_pressure        = &tcp_memory_pressure,
2153         .orphan_count           = &tcp_orphan_count,
2154         .sysctl_mem             = sysctl_tcp_mem,
2155         .sysctl_wmem            = sysctl_tcp_wmem,
2156         .sysctl_rmem            = sysctl_tcp_rmem,
2157         .max_header             = MAX_TCP_HEADER,
2158         .obj_size               = sizeof(struct tcp6_sock),
2159         .slab_flags             = SLAB_DESTROY_BY_RCU,
2160         .twsk_prot              = &tcp6_timewait_sock_ops,
2161         .rsk_prot               = &tcp6_request_sock_ops,
2162         .h.hashinfo             = &tcp_hashinfo,
2163 #ifdef CONFIG_COMPAT
2164         .compat_setsockopt      = compat_tcp_setsockopt,
2165         .compat_getsockopt      = compat_tcp_getsockopt,
2166 #endif
2167 };
2168
2169 static const struct inet6_protocol tcpv6_protocol = {
2170         .handler        =       tcp_v6_rcv,
2171         .err_handler    =       tcp_v6_err,
2172         .gso_send_check =       tcp_v6_gso_send_check,
2173         .gso_segment    =       tcp_tso_segment,
2174         .gro_receive    =       tcp6_gro_receive,
2175         .gro_complete   =       tcp6_gro_complete,
2176         .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2177 };
2178
2179 static struct inet_protosw tcpv6_protosw = {
2180         .type           =       SOCK_STREAM,
2181         .protocol       =       IPPROTO_TCP,
2182         .prot           =       &tcpv6_prot,
2183         .ops            =       &inet6_stream_ops,
2184         .no_check       =       0,
2185         .flags          =       INET_PROTOSW_PERMANENT |
2186                                 INET_PROTOSW_ICSK,
2187 };
2188
2189 static int __net_init tcpv6_net_init(struct net *net)
2190 {
2191         return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2192                                     SOCK_RAW, IPPROTO_TCP, net);
2193 }
2194
2195 static void __net_exit tcpv6_net_exit(struct net *net)
2196 {
2197         inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2198 }
2199
2200 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2201 {
2202         inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2203 }
2204
2205 static struct pernet_operations tcpv6_net_ops = {
2206         .init       = tcpv6_net_init,
2207         .exit       = tcpv6_net_exit,
2208         .exit_batch = tcpv6_net_exit_batch,
2209 };
2210
2211 int __init tcpv6_init(void)
2212 {
2213         int ret;
2214
2215         ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2216         if (ret)
2217                 goto out;
2218
2219         /* register inet6 protocol */
2220         ret = inet6_register_protosw(&tcpv6_protosw);
2221         if (ret)
2222                 goto out_tcpv6_protocol;
2223
2224         ret = register_pernet_subsys(&tcpv6_net_ops);
2225         if (ret)
2226                 goto out_tcpv6_protosw;
2227 out:
2228         return ret;
2229
2230 out_tcpv6_protocol:
2231         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2232 out_tcpv6_protosw:
2233         inet6_unregister_protosw(&tcpv6_protosw);
2234         goto out;
2235 }
2236
2237 void tcpv6_exit(void)
2238 {
2239         unregister_pernet_subsys(&tcpv6_net_ops);
2240         inet6_unregister_protosw(&tcpv6_protosw);
2241         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2242 }