3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
11 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c
13 * linux/net/ipv4/tcp_output.c
16 * Hideaki YOSHIFUJI : sin6_scope_id support
17 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
18 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
19 * a single port at the same time.
20 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
28 #include <linux/module.h>
29 #include <linux/config.h>
30 #include <linux/errno.h>
31 #include <linux/types.h>
32 #include <linux/socket.h>
33 #include <linux/sockios.h>
34 #include <linux/net.h>
35 #include <linux/jiffies.h>
37 #include <linux/in6.h>
38 #include <linux/netdevice.h>
39 #include <linux/init.h>
40 #include <linux/jhash.h>
41 #include <linux/ipsec.h>
42 #include <linux/times.h>
44 #include <linux/ipv6.h>
45 #include <linux/icmpv6.h>
46 #include <linux/random.h>
49 #include <net/ndisc.h>
50 #include <net/inet6_hashtables.h>
51 #include <net/inet6_connection_sock.h>
53 #include <net/transp_v6.h>
54 #include <net/addrconf.h>
55 #include <net/ip6_route.h>
56 #include <net/ip6_checksum.h>
57 #include <net/inet_ecn.h>
58 #include <net/protocol.h>
60 #include <net/addrconf.h>
62 #include <net/dsfield.h>
63 #include <net/timewait_sock.h>
65 #include <asm/uaccess.h>
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
70 static void tcp_v6_send_reset(struct sk_buff *skb);
71 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
72 static void tcp_v6_send_check(struct sock *sk, int len,
75 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
77 static struct inet_connection_sock_af_ops ipv6_mapped;
78 static struct inet_connection_sock_af_ops ipv6_specific;
80 static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
82 return inet_csk_get_port(&tcp_hashinfo, sk, snum,
83 inet6_csk_bind_conflict);
86 static void tcp_v6_hash(struct sock *sk)
88 if (sk->sk_state != TCP_CLOSE) {
89 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
94 __inet6_hash(&tcp_hashinfo, sk);
99 static __inline__ u16 tcp_v6_check(struct tcphdr *th, int len,
100 struct in6_addr *saddr,
101 struct in6_addr *daddr,
104 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
107 static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
109 if (skb->protocol == htons(ETH_P_IPV6)) {
110 return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
111 skb->nh.ipv6h->saddr.s6_addr32,
115 return secure_tcp_sequence_number(skb->nh.iph->daddr,
122 static int __tcp_v6_check_established(struct sock *sk, const __u16 lport,
123 struct inet_timewait_sock **twp)
125 struct inet_sock *inet = inet_sk(sk);
126 const struct ipv6_pinfo *np = inet6_sk(sk);
127 const struct in6_addr *daddr = &np->rcv_saddr;
128 const struct in6_addr *saddr = &np->daddr;
129 const int dif = sk->sk_bound_dev_if;
130 const u32 ports = INET_COMBINED_PORTS(inet->dport, lport);
131 unsigned int hash = inet6_ehashfn(daddr, inet->num, saddr, inet->dport);
132 struct inet_ehash_bucket *head = inet_ehash_bucket(&tcp_hashinfo, hash);
134 const struct hlist_node *node;
135 struct inet_timewait_sock *tw;
137 prefetch(head->chain.first);
138 write_lock(&head->lock);
140 /* Check TIME-WAIT sockets first. */
141 sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) {
142 const struct inet6_timewait_sock *tw6 = inet6_twsk(sk2);
146 if(*((__u32 *)&(tw->tw_dport)) == ports &&
147 sk2->sk_family == PF_INET6 &&
148 ipv6_addr_equal(&tw6->tw_v6_daddr, saddr) &&
149 ipv6_addr_equal(&tw6->tw_v6_rcv_saddr, daddr) &&
150 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) {
151 if (twsk_unique(sk, sk2, twp))
159 /* And established part... */
160 sk_for_each(sk2, node, &head->chain) {
161 if (INET6_MATCH(sk2, hash, saddr, daddr, ports, dif))
166 BUG_TRAP(sk_unhashed(sk));
167 __sk_add_node(sk, &head->chain);
169 sock_prot_inc_use(sk->sk_prot);
170 write_unlock(&head->lock);
174 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
176 /* Silly. Should hash-dance instead... */
177 inet_twsk_deschedule(tw, &tcp_death_row);
178 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
185 write_unlock(&head->lock);
186 return -EADDRNOTAVAIL;
189 static inline u32 tcpv6_port_offset(const struct sock *sk)
191 const struct inet_sock *inet = inet_sk(sk);
192 const struct ipv6_pinfo *np = inet6_sk(sk);
194 return secure_tcpv6_port_ephemeral(np->rcv_saddr.s6_addr32,
199 static int tcp_v6_hash_connect(struct sock *sk)
201 unsigned short snum = inet_sk(sk)->num;
202 struct inet_bind_hashbucket *head;
203 struct inet_bind_bucket *tb;
207 int low = sysctl_local_port_range[0];
208 int high = sysctl_local_port_range[1];
209 int range = high - low;
213 u32 offset = hint + tcpv6_port_offset(sk);
214 struct hlist_node *node;
215 struct inet_timewait_sock *tw = NULL;
218 for (i = 1; i <= range; i++) {
219 port = low + (i + offset) % range;
220 head = &tcp_hashinfo.bhash[inet_bhashfn(port, tcp_hashinfo.bhash_size)];
221 spin_lock(&head->lock);
223 /* Does not bother with rcv_saddr checks,
224 * because the established check is already
227 inet_bind_bucket_for_each(tb, node, &head->chain) {
228 if (tb->port == port) {
229 BUG_TRAP(!hlist_empty(&tb->owners));
230 if (tb->fastreuse >= 0)
232 if (!__tcp_v6_check_established(sk,
240 tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, port);
242 spin_unlock(&head->lock);
249 spin_unlock(&head->lock);
253 return -EADDRNOTAVAIL;
258 /* Head lock still held and bh's disabled */
259 inet_bind_hash(sk, tb, port);
260 if (sk_unhashed(sk)) {
261 inet_sk(sk)->sport = htons(port);
262 __inet6_hash(&tcp_hashinfo, sk);
264 spin_unlock(&head->lock);
267 inet_twsk_deschedule(tw, &tcp_death_row);
275 head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
276 tb = inet_csk(sk)->icsk_bind_hash;
277 spin_lock_bh(&head->lock);
279 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
280 __inet6_hash(&tcp_hashinfo, sk);
281 spin_unlock_bh(&head->lock);
284 spin_unlock(&head->lock);
285 /* No definite answer... Walk to established hash table */
286 ret = __tcp_v6_check_established(sk, snum, NULL);
293 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
296 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
297 struct inet_sock *inet = inet_sk(sk);
298 struct ipv6_pinfo *np = inet6_sk(sk);
299 struct tcp_sock *tp = tcp_sk(sk);
300 struct in6_addr *saddr = NULL, *final_p = NULL, final;
302 struct dst_entry *dst;
306 if (addr_len < SIN6_LEN_RFC2133)
309 if (usin->sin6_family != AF_INET6)
310 return(-EAFNOSUPPORT);
312 memset(&fl, 0, sizeof(fl));
315 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
316 IP6_ECN_flow_init(fl.fl6_flowlabel);
317 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
318 struct ip6_flowlabel *flowlabel;
319 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
320 if (flowlabel == NULL)
322 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
323 fl6_sock_release(flowlabel);
328 * connect() to INADDR_ANY means loopback (BSD'ism).
331 if(ipv6_addr_any(&usin->sin6_addr))
332 usin->sin6_addr.s6_addr[15] = 0x1;
334 addr_type = ipv6_addr_type(&usin->sin6_addr);
336 if(addr_type & IPV6_ADDR_MULTICAST)
339 if (addr_type&IPV6_ADDR_LINKLOCAL) {
340 if (addr_len >= sizeof(struct sockaddr_in6) &&
341 usin->sin6_scope_id) {
342 /* If interface is set while binding, indices
345 if (sk->sk_bound_dev_if &&
346 sk->sk_bound_dev_if != usin->sin6_scope_id)
349 sk->sk_bound_dev_if = usin->sin6_scope_id;
352 /* Connect to link-local address requires an interface */
353 if (!sk->sk_bound_dev_if)
357 if (tp->rx_opt.ts_recent_stamp &&
358 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
359 tp->rx_opt.ts_recent = 0;
360 tp->rx_opt.ts_recent_stamp = 0;
364 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
365 np->flow_label = fl.fl6_flowlabel;
371 if (addr_type == IPV6_ADDR_MAPPED) {
372 u32 exthdrlen = tp->ext_header_len;
373 struct sockaddr_in sin;
375 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
377 if (__ipv6_only_sock(sk))
380 sin.sin_family = AF_INET;
381 sin.sin_port = usin->sin6_port;
382 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
384 inet_csk(sk)->icsk_af_ops = &ipv6_mapped;
385 sk->sk_backlog_rcv = tcp_v4_do_rcv;
387 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
390 tp->ext_header_len = exthdrlen;
391 inet_csk(sk)->icsk_af_ops = &ipv6_specific;
392 sk->sk_backlog_rcv = tcp_v6_do_rcv;
395 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
397 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
404 if (!ipv6_addr_any(&np->rcv_saddr))
405 saddr = &np->rcv_saddr;
407 fl.proto = IPPROTO_TCP;
408 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
409 ipv6_addr_copy(&fl.fl6_src,
410 (saddr ? saddr : &np->saddr));
411 fl.oif = sk->sk_bound_dev_if;
412 fl.fl_ip_dport = usin->sin6_port;
413 fl.fl_ip_sport = inet->sport;
415 if (np->opt && np->opt->srcrt) {
416 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
417 ipv6_addr_copy(&final, &fl.fl6_dst);
418 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
422 err = ip6_dst_lookup(sk, &dst, &fl);
426 ipv6_addr_copy(&fl.fl6_dst, final_p);
428 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
433 ipv6_addr_copy(&np->rcv_saddr, saddr);
436 /* set the source address */
437 ipv6_addr_copy(&np->saddr, saddr);
438 inet->rcv_saddr = LOOPBACK4_IPV6;
440 ip6_dst_store(sk, dst, NULL);
441 sk->sk_route_caps = dst->dev->features &
442 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
444 tp->ext_header_len = 0;
446 tp->ext_header_len = np->opt->opt_flen + np->opt->opt_nflen;
448 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
450 inet->dport = usin->sin6_port;
452 tcp_set_state(sk, TCP_SYN_SENT);
453 err = tcp_v6_hash_connect(sk);
458 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
463 err = tcp_connect(sk);
470 tcp_set_state(sk, TCP_CLOSE);
474 sk->sk_route_caps = 0;
478 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
479 int type, int code, int offset, __u32 info)
481 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
482 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
483 struct ipv6_pinfo *np;
489 sk = inet6_lookup(&tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr,
490 th->source, skb->dev->ifindex);
493 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
497 if (sk->sk_state == TCP_TIME_WAIT) {
498 inet_twsk_put((struct inet_timewait_sock *)sk);
503 if (sock_owned_by_user(sk))
504 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
506 if (sk->sk_state == TCP_CLOSE)
510 seq = ntohl(th->seq);
511 if (sk->sk_state != TCP_LISTEN &&
512 !between(seq, tp->snd_una, tp->snd_nxt)) {
513 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
519 if (type == ICMPV6_PKT_TOOBIG) {
520 struct dst_entry *dst = NULL;
522 if (sock_owned_by_user(sk))
524 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
527 /* icmp should have updated the destination cache entry */
528 dst = __sk_dst_check(sk, np->dst_cookie);
531 struct inet_sock *inet = inet_sk(sk);
534 /* BUGGG_FUTURE: Again, it is not clear how
535 to handle rthdr case. Ignore this complexity
538 memset(&fl, 0, sizeof(fl));
539 fl.proto = IPPROTO_TCP;
540 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
541 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
542 fl.oif = sk->sk_bound_dev_if;
543 fl.fl_ip_dport = inet->dport;
544 fl.fl_ip_sport = inet->sport;
546 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
547 sk->sk_err_soft = -err;
551 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
552 sk->sk_err_soft = -err;
559 if (tp->pmtu_cookie > dst_mtu(dst)) {
560 tcp_sync_mss(sk, dst_mtu(dst));
561 tcp_simple_retransmit(sk);
562 } /* else let the usual retransmit timer handle it */
567 icmpv6_err_convert(type, code, &err);
569 /* Might be for an request_sock */
570 switch (sk->sk_state) {
571 struct request_sock *req, **prev;
573 if (sock_owned_by_user(sk))
576 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
577 &hdr->saddr, inet6_iif(skb));
581 /* ICMPs are not backlogged, hence we cannot get
582 * an established socket here.
584 BUG_TRAP(req->sk == NULL);
586 if (seq != tcp_rsk(req)->snt_isn) {
587 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
591 inet_csk_reqsk_queue_drop(sk, req, prev);
595 case TCP_SYN_RECV: /* Cannot happen.
596 It can, it SYNs are crossed. --ANK */
597 if (!sock_owned_by_user(sk)) {
598 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
600 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
604 sk->sk_err_soft = err;
608 if (!sock_owned_by_user(sk) && np->recverr) {
610 sk->sk_error_report(sk);
612 sk->sk_err_soft = err;
620 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
621 struct dst_entry *dst)
623 struct inet6_request_sock *treq = inet6_rsk(req);
624 struct ipv6_pinfo *np = inet6_sk(sk);
625 struct sk_buff * skb;
626 struct ipv6_txoptions *opt = NULL;
627 struct in6_addr * final_p = NULL, final;
631 memset(&fl, 0, sizeof(fl));
632 fl.proto = IPPROTO_TCP;
633 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
634 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
635 fl.fl6_flowlabel = 0;
637 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
638 fl.fl_ip_sport = inet_sk(sk)->sport;
643 np->rxopt.bits.osrcrt == 2 &&
645 struct sk_buff *pktopts = treq->pktopts;
646 struct inet6_skb_parm *rxopt = IP6CB(pktopts);
648 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(pktopts->nh.raw + rxopt->srcrt));
651 if (opt && opt->srcrt) {
652 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
653 ipv6_addr_copy(&final, &fl.fl6_dst);
654 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
658 err = ip6_dst_lookup(sk, &dst, &fl);
662 ipv6_addr_copy(&fl.fl6_dst, final_p);
663 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
667 skb = tcp_make_synack(sk, dst, req);
669 struct tcphdr *th = skb->h.th;
671 th->check = tcp_v6_check(th, skb->len,
672 &treq->loc_addr, &treq->rmt_addr,
673 csum_partial((char *)th, skb->len, skb->csum));
675 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
676 err = ip6_xmit(sk, skb, &fl, opt, 0);
677 if (err == NET_XMIT_CN)
682 if (opt && opt != np->opt)
683 sock_kfree_s(sk, opt, opt->tot_len);
687 static void tcp_v6_reqsk_destructor(struct request_sock *req)
689 if (inet6_rsk(req)->pktopts)
690 kfree_skb(inet6_rsk(req)->pktopts);
693 static struct request_sock_ops tcp6_request_sock_ops = {
695 .obj_size = sizeof(struct tcp6_request_sock),
696 .rtx_syn_ack = tcp_v6_send_synack,
697 .send_ack = tcp_v6_reqsk_send_ack,
698 .destructor = tcp_v6_reqsk_destructor,
699 .send_reset = tcp_v6_send_reset
702 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
703 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
704 .twsk_unique = tcp_twsk_unique,
707 static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
709 struct ipv6_pinfo *np = inet6_sk(sk);
710 struct tcphdr *th = skb->h.th;
712 if (skb->ip_summed == CHECKSUM_HW) {
713 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
714 skb->csum = offsetof(struct tcphdr, check);
716 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
717 csum_partial((char *)th, th->doff<<2,
723 static void tcp_v6_send_reset(struct sk_buff *skb)
725 struct tcphdr *th = skb->h.th, *t1;
726 struct sk_buff *buff;
732 if (!ipv6_unicast_destination(skb))
736 * We need to grab some memory, and put together an RST,
737 * and then put it into the queue to be sent.
740 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr),
745 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr));
747 t1 = (struct tcphdr *) skb_push(buff,sizeof(struct tcphdr));
749 /* Swap the send and the receive. */
750 memset(t1, 0, sizeof(*t1));
751 t1->dest = th->source;
752 t1->source = th->dest;
753 t1->doff = sizeof(*t1)/4;
757 t1->seq = th->ack_seq;
760 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
761 + skb->len - (th->doff<<2));
764 buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
766 memset(&fl, 0, sizeof(fl));
767 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
768 ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
770 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
771 sizeof(*t1), IPPROTO_TCP,
774 fl.proto = IPPROTO_TCP;
775 fl.oif = inet6_iif(skb);
776 fl.fl_ip_dport = t1->dest;
777 fl.fl_ip_sport = t1->source;
779 /* sk = NULL, but it is safe for now. RST socket required. */
780 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
782 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
783 ip6_xmit(NULL, buff, &fl, NULL, 0);
784 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
785 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
793 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
795 struct tcphdr *th = skb->h.th, *t1;
796 struct sk_buff *buff;
798 int tot_len = sizeof(struct tcphdr);
803 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
808 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
810 t1 = (struct tcphdr *) skb_push(buff,tot_len);
812 /* Swap the send and the receive. */
813 memset(t1, 0, sizeof(*t1));
814 t1->dest = th->source;
815 t1->source = th->dest;
816 t1->doff = tot_len/4;
817 t1->seq = htonl(seq);
818 t1->ack_seq = htonl(ack);
820 t1->window = htons(win);
823 u32 *ptr = (u32*)(t1 + 1);
824 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
825 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
826 *ptr++ = htonl(tcp_time_stamp);
830 buff->csum = csum_partial((char *)t1, tot_len, 0);
832 memset(&fl, 0, sizeof(fl));
833 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
834 ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
836 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
837 tot_len, IPPROTO_TCP,
840 fl.proto = IPPROTO_TCP;
841 fl.oif = inet6_iif(skb);
842 fl.fl_ip_dport = t1->dest;
843 fl.fl_ip_sport = t1->source;
845 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
846 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
847 ip6_xmit(NULL, buff, &fl, NULL, 0);
848 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
856 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
858 struct inet_timewait_sock *tw = inet_twsk(sk);
859 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
861 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
862 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
863 tcptw->tw_ts_recent);
868 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
870 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
874 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
876 struct request_sock *req, **prev;
877 const struct tcphdr *th = skb->h.th;
880 /* Find possible connection requests. */
881 req = inet6_csk_search_req(sk, &prev, th->source,
882 &skb->nh.ipv6h->saddr,
883 &skb->nh.ipv6h->daddr, inet6_iif(skb));
885 return tcp_check_req(sk, skb, req, prev);
887 nsk = __inet6_lookup_established(&tcp_hashinfo, &skb->nh.ipv6h->saddr,
888 th->source, &skb->nh.ipv6h->daddr,
889 ntohs(th->dest), inet6_iif(skb));
892 if (nsk->sk_state != TCP_TIME_WAIT) {
896 inet_twsk_put((struct inet_timewait_sock *)nsk);
900 #if 0 /*def CONFIG_SYN_COOKIES*/
901 if (!th->rst && !th->syn && th->ack)
902 sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt));
907 /* FIXME: this is substantially similar to the ipv4 code.
908 * Can some kind of merge be done? -- erics
910 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
912 struct inet6_request_sock *treq;
913 struct ipv6_pinfo *np = inet6_sk(sk);
914 struct tcp_options_received tmp_opt;
915 struct tcp_sock *tp = tcp_sk(sk);
916 struct request_sock *req = NULL;
917 __u32 isn = TCP_SKB_CB(skb)->when;
919 if (skb->protocol == htons(ETH_P_IP))
920 return tcp_v4_conn_request(sk, skb);
922 if (!ipv6_unicast_destination(skb))
926 * There are no SYN attacks on IPv6, yet...
928 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
930 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
934 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
937 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
941 tcp_clear_options(&tmp_opt);
942 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
943 tmp_opt.user_mss = tp->rx_opt.user_mss;
945 tcp_parse_options(skb, &tmp_opt, 0);
947 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
948 tcp_openreq_init(req, &tmp_opt, skb);
950 treq = inet6_rsk(req);
951 ipv6_addr_copy(&treq->rmt_addr, &skb->nh.ipv6h->saddr);
952 ipv6_addr_copy(&treq->loc_addr, &skb->nh.ipv6h->daddr);
953 TCP_ECN_create_request(req, skb->h.th);
954 treq->pktopts = NULL;
955 if (ipv6_opt_accepted(sk, skb) ||
956 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
957 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
958 atomic_inc(&skb->users);
961 treq->iif = sk->sk_bound_dev_if;
963 /* So that link locals have meaning */
964 if (!sk->sk_bound_dev_if &&
965 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
966 treq->iif = inet6_iif(skb);
969 isn = tcp_v6_init_sequence(sk,skb);
971 tcp_rsk(req)->snt_isn = isn;
973 if (tcp_v6_send_synack(sk, req, NULL))
976 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
983 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
984 return 0; /* don't send reset */
987 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
988 struct request_sock *req,
989 struct dst_entry *dst)
991 struct inet6_request_sock *treq = inet6_rsk(req);
992 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
993 struct tcp6_sock *newtcp6sk;
994 struct inet_sock *newinet;
995 struct tcp_sock *newtp;
997 struct ipv6_txoptions *opt;
999 if (skb->protocol == htons(ETH_P_IP)) {
1004 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1009 newtcp6sk = (struct tcp6_sock *)newsk;
1010 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1012 newinet = inet_sk(newsk);
1013 newnp = inet6_sk(newsk);
1014 newtp = tcp_sk(newsk);
1016 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1018 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
1021 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
1024 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1026 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1027 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1028 newnp->pktoptions = NULL;
1030 newnp->mcast_oif = inet6_iif(skb);
1031 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
1034 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1035 * here, tcp_create_openreq_child now does this for us, see the comment in
1036 * that function for the gory details. -acme
1039 /* It is tricky place. Until this moment IPv4 tcp
1040 worked with IPv6 icsk.icsk_af_ops.
1043 tcp_sync_mss(newsk, newtp->pmtu_cookie);
1050 if (sk_acceptq_is_full(sk))
1053 if (np->rxopt.bits.osrcrt == 2 &&
1054 opt == NULL && treq->pktopts) {
1055 struct inet6_skb_parm *rxopt = IP6CB(treq->pktopts);
1057 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr *)(treq->pktopts->nh.raw + rxopt->srcrt));
1061 struct in6_addr *final_p = NULL, final;
1064 memset(&fl, 0, sizeof(fl));
1065 fl.proto = IPPROTO_TCP;
1066 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1067 if (opt && opt->srcrt) {
1068 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1069 ipv6_addr_copy(&final, &fl.fl6_dst);
1070 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1073 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1074 fl.oif = sk->sk_bound_dev_if;
1075 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1076 fl.fl_ip_sport = inet_sk(sk)->sport;
1078 if (ip6_dst_lookup(sk, &dst, &fl))
1082 ipv6_addr_copy(&fl.fl6_dst, final_p);
1084 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
1088 newsk = tcp_create_openreq_child(sk, req, skb);
1093 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1094 * count here, tcp_create_openreq_child now does this for us, see the
1095 * comment in that function for the gory details. -acme
1098 ip6_dst_store(newsk, dst, NULL);
1099 newsk->sk_route_caps = dst->dev->features &
1100 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
1102 newtcp6sk = (struct tcp6_sock *)newsk;
1103 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1105 newtp = tcp_sk(newsk);
1106 newinet = inet_sk(newsk);
1107 newnp = inet6_sk(newsk);
1109 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1111 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1112 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1113 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1114 newsk->sk_bound_dev_if = treq->iif;
1116 /* Now IPv6 options...
1118 First: no IPv4 options.
1120 newinet->opt = NULL;
1123 newnp->rxopt.all = np->rxopt.all;
1125 /* Clone pktoptions received with SYN */
1126 newnp->pktoptions = NULL;
1127 if (treq->pktopts != NULL) {
1128 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1129 kfree_skb(treq->pktopts);
1130 treq->pktopts = NULL;
1131 if (newnp->pktoptions)
1132 skb_set_owner_r(newnp->pktoptions, newsk);
1135 newnp->mcast_oif = inet6_iif(skb);
1136 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
1138 /* Clone native IPv6 options from listening socket (if any)
1140 Yes, keeping reference count would be much more clever,
1141 but we make one more one thing there: reattach optmem
1145 newnp->opt = ipv6_dup_options(newsk, opt);
1147 sock_kfree_s(sk, opt, opt->tot_len);
1150 newtp->ext_header_len = 0;
1152 newtp->ext_header_len = newnp->opt->opt_nflen +
1153 newnp->opt->opt_flen;
1155 tcp_sync_mss(newsk, dst_mtu(dst));
1156 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1157 tcp_initialize_rcv_mss(newsk);
1159 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1161 __inet6_hash(&tcp_hashinfo, newsk);
1162 inet_inherit_port(&tcp_hashinfo, sk, newsk);
1167 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1169 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1170 if (opt && opt != np->opt)
1171 sock_kfree_s(sk, opt, opt->tot_len);
1176 static int tcp_v6_checksum_init(struct sk_buff *skb)
1178 if (skb->ip_summed == CHECKSUM_HW) {
1179 if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1180 &skb->nh.ipv6h->daddr,skb->csum)) {
1181 skb->ip_summed = CHECKSUM_UNNECESSARY;
1186 skb->csum = ~tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1187 &skb->nh.ipv6h->daddr, 0);
1189 if (skb->len <= 76) {
1190 return __skb_checksum_complete(skb);
1195 /* The socket must have it's spinlock held when we get
1198 * We have a potential double-lock case here, so even when
1199 * doing backlog processing we use the BH locking scheme.
1200 * This is because we cannot sleep with the original spinlock
1203 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1205 struct ipv6_pinfo *np = inet6_sk(sk);
1206 struct tcp_sock *tp;
1207 struct sk_buff *opt_skb = NULL;
1209 /* Imagine: socket is IPv6. IPv4 packet arrives,
1210 goes to IPv4 receive handler and backlogged.
1211 From backlog it always goes here. Kerboom...
1212 Fortunately, tcp_rcv_established and rcv_established
1213 handle them correctly, but it is not case with
1214 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1217 if (skb->protocol == htons(ETH_P_IP))
1218 return tcp_v4_do_rcv(sk, skb);
1220 if (sk_filter(sk, skb, 0))
1224 * socket locking is here for SMP purposes as backlog rcv
1225 * is currently called with bh processing disabled.
1228 /* Do Stevens' IPV6_PKTOPTIONS.
1230 Yes, guys, it is the only place in our code, where we
1231 may make it not affecting IPv4.
1232 The rest of code is protocol independent,
1233 and I do not like idea to uglify IPv4.
1235 Actually, all the idea behind IPV6_PKTOPTIONS
1236 looks not very well thought. For now we latch
1237 options, received in the last packet, enqueued
1238 by tcp. Feel free to propose better solution.
1242 opt_skb = skb_clone(skb, GFP_ATOMIC);
1244 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1245 TCP_CHECK_TIMER(sk);
1246 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1248 TCP_CHECK_TIMER(sk);
1250 goto ipv6_pktoptions;
1254 if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb))
1257 if (sk->sk_state == TCP_LISTEN) {
1258 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1263 * Queue it on the new socket if the new socket is active,
1264 * otherwise we just shortcircuit this and continue with
1268 if (tcp_child_process(sk, nsk, skb))
1271 __kfree_skb(opt_skb);
1276 TCP_CHECK_TIMER(sk);
1277 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1279 TCP_CHECK_TIMER(sk);
1281 goto ipv6_pktoptions;
1285 tcp_v6_send_reset(skb);
1288 __kfree_skb(opt_skb);
1292 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1297 /* Do you ask, what is it?
1299 1. skb was enqueued by tcp.
1300 2. skb is added to tail of read queue, rather than out of order.
1301 3. socket is not in passive state.
1302 4. Finally, it really contains options, which user wants to receive.
1305 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1306 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1307 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1308 np->mcast_oif = inet6_iif(opt_skb);
1309 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1310 np->mcast_hops = opt_skb->nh.ipv6h->hop_limit;
1311 if (ipv6_opt_accepted(sk, opt_skb)) {
1312 skb_set_owner_r(opt_skb, sk);
1313 opt_skb = xchg(&np->pktoptions, opt_skb);
1315 __kfree_skb(opt_skb);
1316 opt_skb = xchg(&np->pktoptions, NULL);
1325 static int tcp_v6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
1327 struct sk_buff *skb = *pskb;
1332 if (skb->pkt_type != PACKET_HOST)
1336 * Count it even if it's bad.
1338 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1340 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1345 if (th->doff < sizeof(struct tcphdr)/4)
1347 if (!pskb_may_pull(skb, th->doff*4))
1350 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1351 tcp_v6_checksum_init(skb)))
1355 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1356 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1357 skb->len - th->doff*4);
1358 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1359 TCP_SKB_CB(skb)->when = 0;
1360 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(skb->nh.ipv6h);
1361 TCP_SKB_CB(skb)->sacked = 0;
1363 sk = __inet6_lookup(&tcp_hashinfo, &skb->nh.ipv6h->saddr, th->source,
1364 &skb->nh.ipv6h->daddr, ntohs(th->dest),
1371 if (sk->sk_state == TCP_TIME_WAIT)
1374 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1375 goto discard_and_relse;
1377 if (sk_filter(sk, skb, 0))
1378 goto discard_and_relse;
1384 if (!sock_owned_by_user(sk)) {
1385 if (!tcp_prequeue(sk, skb))
1386 ret = tcp_v6_do_rcv(sk, skb);
1388 sk_add_backlog(sk, skb);
1392 return ret ? -1 : 0;
1395 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1398 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1400 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1402 tcp_v6_send_reset(skb);
1419 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1420 inet_twsk_put((struct inet_timewait_sock *)sk);
1424 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1425 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1426 inet_twsk_put((struct inet_timewait_sock *)sk);
1430 switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
1436 sk2 = inet6_lookup_listener(&tcp_hashinfo,
1437 &skb->nh.ipv6h->daddr,
1438 ntohs(th->dest), inet6_iif(skb));
1440 struct inet_timewait_sock *tw = inet_twsk(sk);
1441 inet_twsk_deschedule(tw, &tcp_death_row);
1446 /* Fall through to ACK */
1449 tcp_v6_timewait_ack(sk, skb);
1453 case TCP_TW_SUCCESS:;
1458 static int tcp_v6_remember_stamp(struct sock *sk)
1460 /* Alas, not yet... */
1464 static struct inet_connection_sock_af_ops ipv6_specific = {
1465 .queue_xmit = inet6_csk_xmit,
1466 .send_check = tcp_v6_send_check,
1467 .rebuild_header = inet6_sk_rebuild_header,
1468 .conn_request = tcp_v6_conn_request,
1469 .syn_recv_sock = tcp_v6_syn_recv_sock,
1470 .remember_stamp = tcp_v6_remember_stamp,
1471 .net_header_len = sizeof(struct ipv6hdr),
1473 .setsockopt = ipv6_setsockopt,
1474 .getsockopt = ipv6_getsockopt,
1475 .addr2sockaddr = inet6_csk_addr2sockaddr,
1476 .sockaddr_len = sizeof(struct sockaddr_in6)
1480 * TCP over IPv4 via INET6 API
1483 static struct inet_connection_sock_af_ops ipv6_mapped = {
1484 .queue_xmit = ip_queue_xmit,
1485 .send_check = tcp_v4_send_check,
1486 .rebuild_header = inet_sk_rebuild_header,
1487 .conn_request = tcp_v6_conn_request,
1488 .syn_recv_sock = tcp_v6_syn_recv_sock,
1489 .remember_stamp = tcp_v4_remember_stamp,
1490 .net_header_len = sizeof(struct iphdr),
1492 .setsockopt = ipv6_setsockopt,
1493 .getsockopt = ipv6_getsockopt,
1494 .addr2sockaddr = inet6_csk_addr2sockaddr,
1495 .sockaddr_len = sizeof(struct sockaddr_in6)
1500 /* NOTE: A lot of things set to zero explicitly by call to
1501 * sk_alloc() so need not be done here.
1503 static int tcp_v6_init_sock(struct sock *sk)
1505 struct inet_connection_sock *icsk = inet_csk(sk);
1506 struct tcp_sock *tp = tcp_sk(sk);
1508 skb_queue_head_init(&tp->out_of_order_queue);
1509 tcp_init_xmit_timers(sk);
1510 tcp_prequeue_init(tp);
1512 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1513 tp->mdev = TCP_TIMEOUT_INIT;
1515 /* So many TCP implementations out there (incorrectly) count the
1516 * initial SYN frame in their delayed-ACK and congestion control
1517 * algorithms that we must have the following bandaid to talk
1518 * efficiently to them. -DaveM
1522 /* See draft-stevens-tcpca-spec-01 for discussion of the
1523 * initialization of these values.
1525 tp->snd_ssthresh = 0x7fffffff;
1526 tp->snd_cwnd_clamp = ~0;
1527 tp->mss_cache = 536;
1529 tp->reordering = sysctl_tcp_reordering;
1531 sk->sk_state = TCP_CLOSE;
1533 icsk->icsk_af_ops = &ipv6_specific;
1534 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1535 sk->sk_write_space = sk_stream_write_space;
1536 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1538 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1539 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1541 atomic_inc(&tcp_sockets_allocated);
1546 static int tcp_v6_destroy_sock(struct sock *sk)
1548 tcp_v4_destroy_sock(sk);
1549 return inet6_destroy_sock(sk);
1552 /* Proc filesystem TCPv6 sock list dumping. */
1553 static void get_openreq6(struct seq_file *seq,
1554 struct sock *sk, struct request_sock *req, int i, int uid)
1556 int ttd = req->expires - jiffies;
1557 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1558 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1564 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1565 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1567 src->s6_addr32[0], src->s6_addr32[1],
1568 src->s6_addr32[2], src->s6_addr32[3],
1569 ntohs(inet_sk(sk)->sport),
1570 dest->s6_addr32[0], dest->s6_addr32[1],
1571 dest->s6_addr32[2], dest->s6_addr32[3],
1572 ntohs(inet_rsk(req)->rmt_port),
1574 0,0, /* could print option size, but that is af dependent. */
1575 1, /* timers active (only the expire timer) */
1576 jiffies_to_clock_t(ttd),
1579 0, /* non standard timer */
1580 0, /* open_requests have no inode */
1584 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1586 struct in6_addr *dest, *src;
1589 unsigned long timer_expires;
1590 struct inet_sock *inet = inet_sk(sp);
1591 struct tcp_sock *tp = tcp_sk(sp);
1592 const struct inet_connection_sock *icsk = inet_csk(sp);
1593 struct ipv6_pinfo *np = inet6_sk(sp);
1596 src = &np->rcv_saddr;
1597 destp = ntohs(inet->dport);
1598 srcp = ntohs(inet->sport);
1600 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1602 timer_expires = icsk->icsk_timeout;
1603 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1605 timer_expires = icsk->icsk_timeout;
1606 } else if (timer_pending(&sp->sk_timer)) {
1608 timer_expires = sp->sk_timer.expires;
1611 timer_expires = jiffies;
1615 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1616 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
1618 src->s6_addr32[0], src->s6_addr32[1],
1619 src->s6_addr32[2], src->s6_addr32[3], srcp,
1620 dest->s6_addr32[0], dest->s6_addr32[1],
1621 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1623 tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq,
1625 jiffies_to_clock_t(timer_expires - jiffies),
1626 icsk->icsk_retransmits,
1628 icsk->icsk_probes_out,
1630 atomic_read(&sp->sk_refcnt), sp,
1633 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1634 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
1638 static void get_timewait6_sock(struct seq_file *seq,
1639 struct inet_timewait_sock *tw, int i)
1641 struct in6_addr *dest, *src;
1643 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1644 int ttd = tw->tw_ttd - jiffies;
1649 dest = &tw6->tw_v6_daddr;
1650 src = &tw6->tw_v6_rcv_saddr;
1651 destp = ntohs(tw->tw_dport);
1652 srcp = ntohs(tw->tw_sport);
1655 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1656 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1658 src->s6_addr32[0], src->s6_addr32[1],
1659 src->s6_addr32[2], src->s6_addr32[3], srcp,
1660 dest->s6_addr32[0], dest->s6_addr32[1],
1661 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1662 tw->tw_substate, 0, 0,
1663 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1664 atomic_read(&tw->tw_refcnt), tw);
1667 #ifdef CONFIG_PROC_FS
1668 static int tcp6_seq_show(struct seq_file *seq, void *v)
1670 struct tcp_iter_state *st;
1672 if (v == SEQ_START_TOKEN) {
1677 "st tx_queue rx_queue tr tm->when retrnsmt"
1678 " uid timeout inode\n");
1683 switch (st->state) {
1684 case TCP_SEQ_STATE_LISTENING:
1685 case TCP_SEQ_STATE_ESTABLISHED:
1686 get_tcp6_sock(seq, v, st->num);
1688 case TCP_SEQ_STATE_OPENREQ:
1689 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1691 case TCP_SEQ_STATE_TIME_WAIT:
1692 get_timewait6_sock(seq, v, st->num);
1699 static struct file_operations tcp6_seq_fops;
1700 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1701 .owner = THIS_MODULE,
1704 .seq_show = tcp6_seq_show,
1705 .seq_fops = &tcp6_seq_fops,
1708 int __init tcp6_proc_init(void)
1710 return tcp_proc_register(&tcp6_seq_afinfo);
1713 void tcp6_proc_exit(void)
1715 tcp_proc_unregister(&tcp6_seq_afinfo);
1719 struct proto tcpv6_prot = {
1721 .owner = THIS_MODULE,
1723 .connect = tcp_v6_connect,
1724 .disconnect = tcp_disconnect,
1725 .accept = inet_csk_accept,
1727 .init = tcp_v6_init_sock,
1728 .destroy = tcp_v6_destroy_sock,
1729 .shutdown = tcp_shutdown,
1730 .setsockopt = tcp_setsockopt,
1731 .getsockopt = tcp_getsockopt,
1732 .sendmsg = tcp_sendmsg,
1733 .recvmsg = tcp_recvmsg,
1734 .backlog_rcv = tcp_v6_do_rcv,
1735 .hash = tcp_v6_hash,
1736 .unhash = tcp_unhash,
1737 .get_port = tcp_v6_get_port,
1738 .enter_memory_pressure = tcp_enter_memory_pressure,
1739 .sockets_allocated = &tcp_sockets_allocated,
1740 .memory_allocated = &tcp_memory_allocated,
1741 .memory_pressure = &tcp_memory_pressure,
1742 .orphan_count = &tcp_orphan_count,
1743 .sysctl_mem = sysctl_tcp_mem,
1744 .sysctl_wmem = sysctl_tcp_wmem,
1745 .sysctl_rmem = sysctl_tcp_rmem,
1746 .max_header = MAX_TCP_HEADER,
1747 .obj_size = sizeof(struct tcp6_sock),
1748 .twsk_prot = &tcp6_timewait_sock_ops,
1749 .rsk_prot = &tcp6_request_sock_ops,
1752 static struct inet6_protocol tcpv6_protocol = {
1753 .handler = tcp_v6_rcv,
1754 .err_handler = tcp_v6_err,
1755 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1758 static struct inet_protosw tcpv6_protosw = {
1759 .type = SOCK_STREAM,
1760 .protocol = IPPROTO_TCP,
1761 .prot = &tcpv6_prot,
1762 .ops = &inet6_stream_ops,
1765 .flags = INET_PROTOSW_PERMANENT,
1768 void __init tcpv6_init(void)
1770 /* register inet6 protocol */
1771 if (inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP) < 0)
1772 printk(KERN_ERR "tcpv6_init: Could not register protocol\n");
1773 inet6_register_protosw(&tcpv6_protosw);