2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $
10 * IPv4 specific functions
15 * linux/ipv4/tcp_input.c
16 * linux/ipv4/tcp_output.c
18 * See tcp.c for author information
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
28 * David S. Miller : New socket lookup architecture.
29 * This code is dedicated to John Dyson.
30 * David S. Miller : Change semantics of established hash,
31 * half is devoted to TIME_WAIT sockets
32 * and the rest go in the other half.
33 * Andi Kleen : Add support for syncookies and fixed
34 * some bugs: ip options weren't passed to
35 * the TCP layer, missed a check for an
37 * Andi Kleen : Implemented fast path mtu discovery.
38 * Fixed many serious bugs in the
39 * request_sock handling and moved
40 * most of it into the af independent code.
41 * Added tail drop and some other bugfixes.
42 * Added new listen semantics.
43 * Mike McLagan : Routing by source
44 * Juan Jose Ciarlante: ip_dynaddr bits
45 * Andi Kleen: various fixes.
46 * Vitaly E. Lavrov : Transparent proxy revived after year
48 * Andi Kleen : Fix new listen.
49 * Andi Kleen : Fix accept error reporting.
50 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
51 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
52 * a single port at the same time.
55 #include <linux/config.h>
57 #include <linux/types.h>
58 #include <linux/fcntl.h>
59 #include <linux/module.h>
60 #include <linux/random.h>
61 #include <linux/cache.h>
62 #include <linux/jhash.h>
63 #include <linux/init.h>
64 #include <linux/times.h>
67 #include <net/inet_hashtables.h>
69 #include <net/transp_v6.h>
71 #include <net/inet_common.h>
72 #include <net/timewait_sock.h>
75 #include <linux/inet.h>
76 #include <linux/ipv6.h>
77 #include <linux/stddef.h>
78 #include <linux/proc_fs.h>
79 #include <linux/seq_file.h>
81 int sysctl_tcp_tw_reuse;
82 int sysctl_tcp_low_latency;
84 /* Check TCP sequence numbers in ICMP packets. */
85 #define ICMP_MIN_LENGTH 8
87 /* Socket used for sending RSTs */
88 static struct socket *tcp_socket;
90 void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb);
92 struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
93 .lhash_lock = RW_LOCK_UNLOCKED,
94 .lhash_users = ATOMIC_INIT(0),
95 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
98 static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
100 return inet_csk_get_port(&tcp_hashinfo, sk, snum,
101 inet_csk_bind_conflict);
104 static void tcp_v4_hash(struct sock *sk)
106 inet_hash(&tcp_hashinfo, sk);
109 void tcp_unhash(struct sock *sk)
111 inet_unhash(&tcp_hashinfo, sk);
114 static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
116 return secure_tcp_sequence_number(skb->nh.iph->daddr,
122 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
124 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
125 struct tcp_sock *tp = tcp_sk(sk);
127 /* With PAWS, it is safe from the viewpoint
128 of data integrity. Even without PAWS it is safe provided sequence
129 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
131 Actually, the idea is close to VJ's one, only timestamp cache is
132 held not per host, but per port pair and TW bucket is used as state
135 If TW bucket has been already destroyed we fall back to VJ's scheme
136 and use initial timestamp retrieved from peer table.
138 if (tcptw->tw_ts_recent_stamp &&
139 (twp == NULL || (sysctl_tcp_tw_reuse &&
140 xtime.tv_sec - tcptw->tw_ts_recent_stamp > 1))) {
141 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
142 if (tp->write_seq == 0)
144 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
145 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
153 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
155 /* This will initiate an outgoing connection. */
156 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
158 struct inet_sock *inet = inet_sk(sk);
159 struct tcp_sock *tp = tcp_sk(sk);
160 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
166 if (addr_len < sizeof(struct sockaddr_in))
169 if (usin->sin_family != AF_INET)
170 return -EAFNOSUPPORT;
172 nexthop = daddr = usin->sin_addr.s_addr;
173 if (inet->opt && inet->opt->srr) {
176 nexthop = inet->opt->faddr;
179 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
180 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
182 inet->sport, usin->sin_port, sk);
186 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
191 if (!inet->opt || !inet->opt->srr)
195 inet->saddr = rt->rt_src;
196 inet->rcv_saddr = inet->saddr;
198 if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) {
199 /* Reset inherited state */
200 tp->rx_opt.ts_recent = 0;
201 tp->rx_opt.ts_recent_stamp = 0;
205 if (tcp_death_row.sysctl_tw_recycle &&
206 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
207 struct inet_peer *peer = rt_get_peer(rt);
209 /* VJ's idea. We save last timestamp seen from
210 * the destination in peer table, when entering state TIME-WAIT
211 * and initialize rx_opt.ts_recent from it, when trying new connection.
214 if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
215 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
216 tp->rx_opt.ts_recent = peer->tcp_ts;
220 inet->dport = usin->sin_port;
223 inet_csk(sk)->icsk_ext_hdr_len = 0;
225 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
227 tp->rx_opt.mss_clamp = 536;
229 /* Socket identity is still unknown (sport may be zero).
230 * However we set state to SYN-SENT and not releasing socket
231 * lock select source port, enter ourselves into the hash tables and
232 * complete initialization after this.
234 tcp_set_state(sk, TCP_SYN_SENT);
235 err = inet_hash_connect(&tcp_death_row, sk);
239 err = ip_route_newports(&rt, inet->sport, inet->dport, sk);
243 /* OK, now commit destination to socket. */
244 sk_setup_caps(sk, &rt->u.dst);
247 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
252 inet->id = tp->write_seq ^ jiffies;
254 err = tcp_connect(sk);
262 /* This unhashes the socket and releases the local port, if necessary. */
263 tcp_set_state(sk, TCP_CLOSE);
265 sk->sk_route_caps = 0;
271 * This routine does path mtu discovery as defined in RFC1191.
273 static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
275 struct dst_entry *dst;
276 struct inet_sock *inet = inet_sk(sk);
278 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
279 * send out by Linux are always <576bytes so they should go through
282 if (sk->sk_state == TCP_LISTEN)
285 /* We don't check in the destentry if pmtu discovery is forbidden
286 * on this route. We just assume that no packet_to_big packets
287 * are send back when pmtu discovery is not active.
288 * There is a small race when the user changes this flag in the
289 * route, but I think that's acceptable.
291 if ((dst = __sk_dst_check(sk, 0)) == NULL)
294 dst->ops->update_pmtu(dst, mtu);
296 /* Something is about to be wrong... Remember soft error
297 * for the case, if this connection will not able to recover.
299 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
300 sk->sk_err_soft = EMSGSIZE;
304 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
305 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
306 tcp_sync_mss(sk, mtu);
308 /* Resend the TCP packet because it's
309 * clear that the old packet has been
310 * dropped. This is the new "fast" path mtu
313 tcp_simple_retransmit(sk);
314 } /* else let the usual retransmit timer handle it */
318 * This routine is called by the ICMP module when it gets some
319 * sort of error condition. If err < 0 then the socket should
320 * be closed and the error returned to the user. If err > 0
321 * it's just the icmp type << 8 | icmp code. After adjustment
322 * header points to the first 8 bytes of the tcp header. We need
323 * to find the appropriate port.
325 * The locking strategy used here is very "optimistic". When
326 * someone else accesses the socket the ICMP is just dropped
327 * and for some paths there is no check at all.
328 * A more general error queue to queue errors for later handling
329 * is probably better.
333 void tcp_v4_err(struct sk_buff *skb, u32 info)
335 struct iphdr *iph = (struct iphdr *)skb->data;
336 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
338 struct inet_sock *inet;
339 int type = skb->h.icmph->type;
340 int code = skb->h.icmph->code;
345 if (skb->len < (iph->ihl << 2) + 8) {
346 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
350 sk = inet_lookup(&tcp_hashinfo, iph->daddr, th->dest, iph->saddr,
351 th->source, inet_iif(skb));
353 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
356 if (sk->sk_state == TCP_TIME_WAIT) {
357 inet_twsk_put((struct inet_timewait_sock *)sk);
362 /* If too many ICMPs get dropped on busy
363 * servers this needs to be solved differently.
365 if (sock_owned_by_user(sk))
366 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
368 if (sk->sk_state == TCP_CLOSE)
372 seq = ntohl(th->seq);
373 if (sk->sk_state != TCP_LISTEN &&
374 !between(seq, tp->snd_una, tp->snd_nxt)) {
375 NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
380 case ICMP_SOURCE_QUENCH:
381 /* Just silently ignore these. */
383 case ICMP_PARAMETERPROB:
386 case ICMP_DEST_UNREACH:
387 if (code > NR_ICMP_UNREACH)
390 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
391 if (!sock_owned_by_user(sk))
392 do_pmtu_discovery(sk, iph, info);
396 err = icmp_err_convert[code].errno;
398 case ICMP_TIME_EXCEEDED:
405 switch (sk->sk_state) {
406 struct request_sock *req, **prev;
408 if (sock_owned_by_user(sk))
411 req = inet_csk_search_req(sk, &prev, th->dest,
412 iph->daddr, iph->saddr);
416 /* ICMPs are not backlogged, hence we cannot get
417 an established socket here.
421 if (seq != tcp_rsk(req)->snt_isn) {
422 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
427 * Still in SYN_RECV, just remove it silently.
428 * There is no good way to pass the error to the newly
429 * created socket, and POSIX does not want network
430 * errors returned from accept().
432 inet_csk_reqsk_queue_drop(sk, req, prev);
436 case TCP_SYN_RECV: /* Cannot happen.
437 It can f.e. if SYNs crossed.
439 if (!sock_owned_by_user(sk)) {
440 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
443 sk->sk_error_report(sk);
447 sk->sk_err_soft = err;
452 /* If we've already connected we will keep trying
453 * until we time out, or the user gives up.
455 * rfc1122 4.2.3.9 allows to consider as hard errors
456 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
457 * but it is obsoleted by pmtu discovery).
459 * Note, that in modern internet, where routing is unreliable
460 * and in each dark corner broken firewalls sit, sending random
461 * errors ordered by their masters even this two messages finally lose
462 * their original sense (even Linux sends invalid PORT_UNREACHs)
464 * Now we are in compliance with RFCs.
469 if (!sock_owned_by_user(sk) && inet->recverr) {
471 sk->sk_error_report(sk);
472 } else { /* Only an error on timeout */
473 sk->sk_err_soft = err;
481 /* This routine computes an IPv4 TCP checksum. */
482 void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
484 struct inet_sock *inet = inet_sk(sk);
485 struct tcphdr *th = skb->h.th;
487 if (skb->ip_summed == CHECKSUM_HW) {
488 th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0);
489 skb->csum = offsetof(struct tcphdr, check);
491 th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr,
492 csum_partial((char *)th,
499 * This routine will send an RST to the other tcp.
501 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
503 * Answer: if a packet caused RST, it is not for a socket
504 * existing in our system, if it is matched to a socket,
505 * it is just duplicate segment or bug in other side's TCP.
506 * So that we build reply only basing on parameters
507 * arrived with segment.
508 * Exception: precedence violation. We do not implement it in any case.
511 static void tcp_v4_send_reset(struct sk_buff *skb)
513 struct tcphdr *th = skb->h.th;
515 struct ip_reply_arg arg;
517 /* Never send a reset in response to a reset. */
521 if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL)
524 /* Swap the send and the receive. */
525 memset(&rth, 0, sizeof(struct tcphdr));
526 rth.dest = th->source;
527 rth.source = th->dest;
528 rth.doff = sizeof(struct tcphdr) / 4;
532 rth.seq = th->ack_seq;
535 rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
536 skb->len - (th->doff << 2));
539 memset(&arg, 0, sizeof arg);
540 arg.iov[0].iov_base = (unsigned char *)&rth;
541 arg.iov[0].iov_len = sizeof rth;
542 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
543 skb->nh.iph->saddr, /*XXX*/
544 sizeof(struct tcphdr), IPPROTO_TCP, 0);
545 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
547 ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
549 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
550 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
553 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
554 outside socket context is ugly, certainly. What can I do?
557 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
560 struct tcphdr *th = skb->h.th;
565 struct ip_reply_arg arg;
567 memset(&rep.th, 0, sizeof(struct tcphdr));
568 memset(&arg, 0, sizeof arg);
570 arg.iov[0].iov_base = (unsigned char *)&rep;
571 arg.iov[0].iov_len = sizeof(rep.th);
573 rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
574 (TCPOPT_TIMESTAMP << 8) |
576 rep.tsopt[1] = htonl(tcp_time_stamp);
577 rep.tsopt[2] = htonl(ts);
578 arg.iov[0].iov_len = sizeof(rep);
581 /* Swap the send and the receive. */
582 rep.th.dest = th->source;
583 rep.th.source = th->dest;
584 rep.th.doff = arg.iov[0].iov_len / 4;
585 rep.th.seq = htonl(seq);
586 rep.th.ack_seq = htonl(ack);
588 rep.th.window = htons(win);
590 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
591 skb->nh.iph->saddr, /*XXX*/
592 arg.iov[0].iov_len, IPPROTO_TCP, 0);
593 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
595 ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
597 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
600 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
602 struct inet_timewait_sock *tw = inet_twsk(sk);
603 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
605 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
606 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent);
611 static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
613 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
618 * Send a SYN-ACK after having received an ACK.
619 * This still operates on a request_sock only, not on a big
622 static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
623 struct dst_entry *dst)
625 const struct inet_request_sock *ireq = inet_rsk(req);
627 struct sk_buff * skb;
629 /* First, grab a route. */
630 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
633 skb = tcp_make_synack(sk, dst, req);
636 struct tcphdr *th = skb->h.th;
638 th->check = tcp_v4_check(th, skb->len,
641 csum_partial((char *)th, skb->len,
644 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
647 if (err == NET_XMIT_CN)
657 * IPv4 request_sock destructor.
659 static void tcp_v4_reqsk_destructor(struct request_sock *req)
661 kfree(inet_rsk(req)->opt);
664 static void syn_flood_warning(struct sk_buff *skb)
666 static unsigned long warntime;
668 if (time_after(jiffies, (warntime + HZ * 60))) {
671 "possible SYN flooding on port %d. Sending cookies.\n",
672 ntohs(skb->h.th->dest));
677 * Save and compile IPv4 options into the request_sock if needed.
679 static struct ip_options *tcp_v4_save_options(struct sock *sk,
682 struct ip_options *opt = &(IPCB(skb)->opt);
683 struct ip_options *dopt = NULL;
685 if (opt && opt->optlen) {
686 int opt_size = optlength(opt);
687 dopt = kmalloc(opt_size, GFP_ATOMIC);
689 if (ip_options_echo(dopt, skb)) {
698 struct request_sock_ops tcp_request_sock_ops = {
700 .obj_size = sizeof(struct tcp_request_sock),
701 .rtx_syn_ack = tcp_v4_send_synack,
702 .send_ack = tcp_v4_reqsk_send_ack,
703 .destructor = tcp_v4_reqsk_destructor,
704 .send_reset = tcp_v4_send_reset,
707 static struct timewait_sock_ops tcp_timewait_sock_ops = {
708 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
709 .twsk_unique = tcp_twsk_unique,
712 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
714 struct inet_request_sock *ireq;
715 struct tcp_options_received tmp_opt;
716 struct request_sock *req;
717 __u32 saddr = skb->nh.iph->saddr;
718 __u32 daddr = skb->nh.iph->daddr;
719 __u32 isn = TCP_SKB_CB(skb)->when;
720 struct dst_entry *dst = NULL;
721 #ifdef CONFIG_SYN_COOKIES
724 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
727 /* Never answer to SYNs send to broadcast or multicast */
728 if (((struct rtable *)skb->dst)->rt_flags &
729 (RTCF_BROADCAST | RTCF_MULTICAST))
732 /* TW buckets are converted to open requests without
733 * limitations, they conserve resources and peer is
734 * evidently real one.
736 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
737 #ifdef CONFIG_SYN_COOKIES
738 if (sysctl_tcp_syncookies) {
745 /* Accept backlog is full. If we have already queued enough
746 * of warm entries in syn queue, drop request. It is better than
747 * clogging syn queue with openreqs with exponentially increasing
750 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
753 req = reqsk_alloc(&tcp_request_sock_ops);
757 tcp_clear_options(&tmp_opt);
758 tmp_opt.mss_clamp = 536;
759 tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
761 tcp_parse_options(skb, &tmp_opt, 0);
764 tcp_clear_options(&tmp_opt);
765 tmp_opt.saw_tstamp = 0;
768 if (tmp_opt.saw_tstamp && !tmp_opt.rcv_tsval) {
769 /* Some OSes (unknown ones, but I see them on web server, which
770 * contains information interesting only for windows'
771 * users) do not send their stamp in SYN. It is easy case.
772 * We simply do not advertise TS support.
774 tmp_opt.saw_tstamp = 0;
775 tmp_opt.tstamp_ok = 0;
777 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
779 tcp_openreq_init(req, &tmp_opt, skb);
781 ireq = inet_rsk(req);
782 ireq->loc_addr = daddr;
783 ireq->rmt_addr = saddr;
784 ireq->opt = tcp_v4_save_options(sk, skb);
786 TCP_ECN_create_request(req, skb->h.th);
789 #ifdef CONFIG_SYN_COOKIES
790 syn_flood_warning(skb);
792 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
794 struct inet_peer *peer = NULL;
796 /* VJ's idea. We save last timestamp seen
797 * from the destination in peer table, when entering
798 * state TIME-WAIT, and check against it before
799 * accepting new connection request.
801 * If "isn" is not zero, this request hit alive
802 * timewait bucket, so that all the necessary checks
803 * are made in the function processing timewait state.
805 if (tmp_opt.saw_tstamp &&
806 tcp_death_row.sysctl_tw_recycle &&
807 (dst = inet_csk_route_req(sk, req)) != NULL &&
808 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
809 peer->v4daddr == saddr) {
810 if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
811 (s32)(peer->tcp_ts - req->ts_recent) >
813 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
818 /* Kill the following clause, if you dislike this way. */
819 else if (!sysctl_tcp_syncookies &&
820 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
821 (sysctl_max_syn_backlog >> 2)) &&
822 (!peer || !peer->tcp_ts_stamp) &&
823 (!dst || !dst_metric(dst, RTAX_RTT))) {
824 /* Without syncookies last quarter of
825 * backlog is filled with destinations,
826 * proven to be alive.
827 * It means that we continue to communicate
828 * to destinations, already remembered
829 * to the moment of synflood.
831 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open "
832 "request from %u.%u.%u.%u/%u\n",
834 ntohs(skb->h.th->source));
839 isn = tcp_v4_init_sequence(sk, skb);
841 tcp_rsk(req)->snt_isn = isn;
843 if (tcp_v4_send_synack(sk, req, dst))
849 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
856 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
862 * The three way handshake has completed - we got a valid synack -
863 * now create the new socket.
865 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
866 struct request_sock *req,
867 struct dst_entry *dst)
869 struct inet_request_sock *ireq;
870 struct inet_sock *newinet;
871 struct tcp_sock *newtp;
874 if (sk_acceptq_is_full(sk))
877 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
880 newsk = tcp_create_openreq_child(sk, req, skb);
884 sk_setup_caps(newsk, dst);
886 newtp = tcp_sk(newsk);
887 newinet = inet_sk(newsk);
888 ireq = inet_rsk(req);
889 newinet->daddr = ireq->rmt_addr;
890 newinet->rcv_saddr = ireq->loc_addr;
891 newinet->saddr = ireq->loc_addr;
892 newinet->opt = ireq->opt;
894 newinet->mc_index = inet_iif(skb);
895 newinet->mc_ttl = skb->nh.iph->ttl;
896 inet_csk(newsk)->icsk_ext_hdr_len = 0;
898 inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
899 newinet->id = newtp->write_seq ^ jiffies;
901 tcp_sync_mss(newsk, dst_mtu(dst));
902 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
903 tcp_initialize_rcv_mss(newsk);
905 __inet_hash(&tcp_hashinfo, newsk, 0);
906 __inet_inherit_port(&tcp_hashinfo, sk, newsk);
911 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
913 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
918 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
920 struct tcphdr *th = skb->h.th;
921 struct iphdr *iph = skb->nh.iph;
923 struct request_sock **prev;
924 /* Find possible connection requests. */
925 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
926 iph->saddr, iph->daddr);
928 return tcp_check_req(sk, skb, req, prev);
930 nsk = __inet_lookup_established(&tcp_hashinfo, skb->nh.iph->saddr,
931 th->source, skb->nh.iph->daddr,
932 ntohs(th->dest), inet_iif(skb));
935 if (nsk->sk_state != TCP_TIME_WAIT) {
939 inet_twsk_put((struct inet_timewait_sock *)nsk);
943 #ifdef CONFIG_SYN_COOKIES
944 if (!th->rst && !th->syn && th->ack)
945 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
950 static int tcp_v4_checksum_init(struct sk_buff *skb)
952 if (skb->ip_summed == CHECKSUM_HW) {
953 if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
954 skb->nh.iph->daddr, skb->csum)) {
955 skb->ip_summed = CHECKSUM_UNNECESSARY;
960 skb->csum = csum_tcpudp_nofold(skb->nh.iph->saddr, skb->nh.iph->daddr,
961 skb->len, IPPROTO_TCP, 0);
963 if (skb->len <= 76) {
964 return __skb_checksum_complete(skb);
970 /* The socket must have it's spinlock held when we get
973 * We have a potential double-lock case here, so even when
974 * doing backlog processing we use the BH locking scheme.
975 * This is because we cannot sleep with the original spinlock
978 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
980 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
982 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
988 if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
991 if (sk->sk_state == TCP_LISTEN) {
992 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
997 if (tcp_child_process(sk, nsk, skb))
1003 TCP_CHECK_TIMER(sk);
1004 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1006 TCP_CHECK_TIMER(sk);
1010 tcp_v4_send_reset(skb);
1013 /* Be careful here. If this function gets more complicated and
1014 * gcc suffers from register pressure on the x86, sk (in %ebx)
1015 * might be destroyed here. This current version compiles correctly,
1016 * but you have been warned.
1021 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1029 int tcp_v4_rcv(struct sk_buff *skb)
1035 if (skb->pkt_type != PACKET_HOST)
1038 /* Count it even if it's bad */
1039 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1041 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1046 if (th->doff < sizeof(struct tcphdr) / 4)
1048 if (!pskb_may_pull(skb, th->doff * 4))
1051 /* An explanation is required here, I think.
1052 * Packet length and doff are validated by header prediction,
1053 * provided case of th->doff==0 is eliminated.
1054 * So, we defer the checks. */
1055 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1056 tcp_v4_checksum_init(skb)))
1060 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1061 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1062 skb->len - th->doff * 4);
1063 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1064 TCP_SKB_CB(skb)->when = 0;
1065 TCP_SKB_CB(skb)->flags = skb->nh.iph->tos;
1066 TCP_SKB_CB(skb)->sacked = 0;
1068 sk = __inet_lookup(&tcp_hashinfo, skb->nh.iph->saddr, th->source,
1069 skb->nh.iph->daddr, ntohs(th->dest),
1076 if (sk->sk_state == TCP_TIME_WAIT)
1079 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1080 goto discard_and_relse;
1082 if (sk_filter(sk, skb, 0))
1083 goto discard_and_relse;
1089 if (!sock_owned_by_user(sk)) {
1090 if (!tcp_prequeue(sk, skb))
1091 ret = tcp_v4_do_rcv(sk, skb);
1093 sk_add_backlog(sk, skb);
1101 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1104 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1106 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1108 tcp_v4_send_reset(skb);
1112 /* Discard frame. */
1121 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1122 inet_twsk_put((struct inet_timewait_sock *) sk);
1126 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1127 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1128 inet_twsk_put((struct inet_timewait_sock *) sk);
1131 switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
1134 struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo,
1139 inet_twsk_deschedule((struct inet_timewait_sock *)sk,
1141 inet_twsk_put((struct inet_timewait_sock *)sk);
1145 /* Fall through to ACK */
1148 tcp_v4_timewait_ack(sk, skb);
1152 case TCP_TW_SUCCESS:;
1157 /* VJ's idea. Save last timestamp seen from this destination
1158 * and hold it at least for normal timewait interval to use for duplicate
1159 * segment detection in subsequent connections, before they enter synchronized
1163 int tcp_v4_remember_stamp(struct sock *sk)
1165 struct inet_sock *inet = inet_sk(sk);
1166 struct tcp_sock *tp = tcp_sk(sk);
1167 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1168 struct inet_peer *peer = NULL;
1171 if (!rt || rt->rt_dst != inet->daddr) {
1172 peer = inet_getpeer(inet->daddr, 1);
1176 rt_bind_peer(rt, 1);
1181 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
1182 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
1183 peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) {
1184 peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp;
1185 peer->tcp_ts = tp->rx_opt.ts_recent;
1195 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1197 struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
1200 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1202 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
1203 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
1204 peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {
1205 peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;
1206 peer->tcp_ts = tcptw->tw_ts_recent;
1215 struct inet_connection_sock_af_ops ipv4_specific = {
1216 .queue_xmit = ip_queue_xmit,
1217 .send_check = tcp_v4_send_check,
1218 .rebuild_header = inet_sk_rebuild_header,
1219 .conn_request = tcp_v4_conn_request,
1220 .syn_recv_sock = tcp_v4_syn_recv_sock,
1221 .remember_stamp = tcp_v4_remember_stamp,
1222 .net_header_len = sizeof(struct iphdr),
1223 .setsockopt = ip_setsockopt,
1224 .getsockopt = ip_getsockopt,
1225 .addr2sockaddr = inet_csk_addr2sockaddr,
1226 .sockaddr_len = sizeof(struct sockaddr_in),
1229 /* NOTE: A lot of things set to zero explicitly by call to
1230 * sk_alloc() so need not be done here.
1232 static int tcp_v4_init_sock(struct sock *sk)
1234 struct inet_connection_sock *icsk = inet_csk(sk);
1235 struct tcp_sock *tp = tcp_sk(sk);
1237 skb_queue_head_init(&tp->out_of_order_queue);
1238 tcp_init_xmit_timers(sk);
1239 tcp_prequeue_init(tp);
1241 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1242 tp->mdev = TCP_TIMEOUT_INIT;
1244 /* So many TCP implementations out there (incorrectly) count the
1245 * initial SYN frame in their delayed-ACK and congestion control
1246 * algorithms that we must have the following bandaid to talk
1247 * efficiently to them. -DaveM
1251 /* See draft-stevens-tcpca-spec-01 for discussion of the
1252 * initialization of these values.
1254 tp->snd_ssthresh = 0x7fffffff; /* Infinity */
1255 tp->snd_cwnd_clamp = ~0;
1256 tp->mss_cache = 536;
1258 tp->reordering = sysctl_tcp_reordering;
1259 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1261 sk->sk_state = TCP_CLOSE;
1263 sk->sk_write_space = sk_stream_write_space;
1264 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1266 icsk->icsk_af_ops = &ipv4_specific;
1267 icsk->icsk_sync_mss = tcp_sync_mss;
1269 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1270 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1272 atomic_inc(&tcp_sockets_allocated);
1277 int tcp_v4_destroy_sock(struct sock *sk)
1279 struct tcp_sock *tp = tcp_sk(sk);
1281 tcp_clear_xmit_timers(sk);
1283 tcp_cleanup_congestion_control(sk);
1285 /* Cleanup up the write buffer. */
1286 sk_stream_writequeue_purge(sk);
1288 /* Cleans up our, hopefully empty, out_of_order_queue. */
1289 __skb_queue_purge(&tp->out_of_order_queue);
1291 /* Clean prequeue, it must be empty really */
1292 __skb_queue_purge(&tp->ucopy.prequeue);
1294 /* Clean up a referenced TCP bind bucket. */
1295 if (inet_csk(sk)->icsk_bind_hash)
1296 inet_put_port(&tcp_hashinfo, sk);
1299 * If sendmsg cached page exists, toss it.
1301 if (sk->sk_sndmsg_page) {
1302 __free_page(sk->sk_sndmsg_page);
1303 sk->sk_sndmsg_page = NULL;
1306 atomic_dec(&tcp_sockets_allocated);
1311 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1313 #ifdef CONFIG_PROC_FS
1314 /* Proc filesystem TCP sock list dumping. */
1316 static inline struct inet_timewait_sock *tw_head(struct hlist_head *head)
1318 return hlist_empty(head) ? NULL :
1319 list_entry(head->first, struct inet_timewait_sock, tw_node);
1322 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1324 return tw->tw_node.next ?
1325 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1328 static void *listening_get_next(struct seq_file *seq, void *cur)
1330 struct inet_connection_sock *icsk;
1331 struct hlist_node *node;
1332 struct sock *sk = cur;
1333 struct tcp_iter_state* st = seq->private;
1337 sk = sk_head(&tcp_hashinfo.listening_hash[0]);
1343 if (st->state == TCP_SEQ_STATE_OPENREQ) {
1344 struct request_sock *req = cur;
1346 icsk = inet_csk(st->syn_wait_sk);
1350 if (req->rsk_ops->family == st->family) {
1356 if (++st->sbucket >= TCP_SYNQ_HSIZE)
1359 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1361 sk = sk_next(st->syn_wait_sk);
1362 st->state = TCP_SEQ_STATE_LISTENING;
1363 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1365 icsk = inet_csk(sk);
1366 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1367 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1369 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1373 sk_for_each_from(sk, node) {
1374 if (sk->sk_family == st->family) {
1378 icsk = inet_csk(sk);
1379 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1380 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1382 st->uid = sock_i_uid(sk);
1383 st->syn_wait_sk = sk;
1384 st->state = TCP_SEQ_STATE_OPENREQ;
1388 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1390 if (++st->bucket < INET_LHTABLE_SIZE) {
1391 sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]);
1399 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1401 void *rc = listening_get_next(seq, NULL);
1403 while (rc && *pos) {
1404 rc = listening_get_next(seq, rc);
1410 static void *established_get_first(struct seq_file *seq)
1412 struct tcp_iter_state* st = seq->private;
1415 for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
1417 struct hlist_node *node;
1418 struct inet_timewait_sock *tw;
1420 /* We can reschedule _before_ having picked the target: */
1421 cond_resched_softirq();
1423 read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
1424 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1425 if (sk->sk_family != st->family) {
1431 st->state = TCP_SEQ_STATE_TIME_WAIT;
1432 inet_twsk_for_each(tw, node,
1433 &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) {
1434 if (tw->tw_family != st->family) {
1440 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
1441 st->state = TCP_SEQ_STATE_ESTABLISHED;
1447 static void *established_get_next(struct seq_file *seq, void *cur)
1449 struct sock *sk = cur;
1450 struct inet_timewait_sock *tw;
1451 struct hlist_node *node;
1452 struct tcp_iter_state* st = seq->private;
1456 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
1460 while (tw && tw->tw_family != st->family) {
1467 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
1468 st->state = TCP_SEQ_STATE_ESTABLISHED;
1470 /* We can reschedule between buckets: */
1471 cond_resched_softirq();
1473 if (++st->bucket < tcp_hashinfo.ehash_size) {
1474 read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
1475 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
1483 sk_for_each_from(sk, node) {
1484 if (sk->sk_family == st->family)
1488 st->state = TCP_SEQ_STATE_TIME_WAIT;
1489 tw = tw_head(&tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain);
1497 static void *established_get_idx(struct seq_file *seq, loff_t pos)
1499 void *rc = established_get_first(seq);
1502 rc = established_get_next(seq, rc);
1508 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1511 struct tcp_iter_state* st = seq->private;
1513 inet_listen_lock(&tcp_hashinfo);
1514 st->state = TCP_SEQ_STATE_LISTENING;
1515 rc = listening_get_idx(seq, &pos);
1518 inet_listen_unlock(&tcp_hashinfo);
1520 st->state = TCP_SEQ_STATE_ESTABLISHED;
1521 rc = established_get_idx(seq, pos);
1527 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
1529 struct tcp_iter_state* st = seq->private;
1530 st->state = TCP_SEQ_STATE_LISTENING;
1532 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1535 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1538 struct tcp_iter_state* st;
1540 if (v == SEQ_START_TOKEN) {
1541 rc = tcp_get_idx(seq, 0);
1546 switch (st->state) {
1547 case TCP_SEQ_STATE_OPENREQ:
1548 case TCP_SEQ_STATE_LISTENING:
1549 rc = listening_get_next(seq, v);
1551 inet_listen_unlock(&tcp_hashinfo);
1553 st->state = TCP_SEQ_STATE_ESTABLISHED;
1554 rc = established_get_first(seq);
1557 case TCP_SEQ_STATE_ESTABLISHED:
1558 case TCP_SEQ_STATE_TIME_WAIT:
1559 rc = established_get_next(seq, v);
1567 static void tcp_seq_stop(struct seq_file *seq, void *v)
1569 struct tcp_iter_state* st = seq->private;
1571 switch (st->state) {
1572 case TCP_SEQ_STATE_OPENREQ:
1574 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
1575 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1577 case TCP_SEQ_STATE_LISTENING:
1578 if (v != SEQ_START_TOKEN)
1579 inet_listen_unlock(&tcp_hashinfo);
1581 case TCP_SEQ_STATE_TIME_WAIT:
1582 case TCP_SEQ_STATE_ESTABLISHED:
1584 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
1590 static int tcp_seq_open(struct inode *inode, struct file *file)
1592 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
1593 struct seq_file *seq;
1594 struct tcp_iter_state *s;
1597 if (unlikely(afinfo == NULL))
1600 s = kmalloc(sizeof(*s), GFP_KERNEL);
1603 memset(s, 0, sizeof(*s));
1604 s->family = afinfo->family;
1605 s->seq_ops.start = tcp_seq_start;
1606 s->seq_ops.next = tcp_seq_next;
1607 s->seq_ops.show = afinfo->seq_show;
1608 s->seq_ops.stop = tcp_seq_stop;
1610 rc = seq_open(file, &s->seq_ops);
1613 seq = file->private_data;
1622 int tcp_proc_register(struct tcp_seq_afinfo *afinfo)
1625 struct proc_dir_entry *p;
1629 afinfo->seq_fops->owner = afinfo->owner;
1630 afinfo->seq_fops->open = tcp_seq_open;
1631 afinfo->seq_fops->read = seq_read;
1632 afinfo->seq_fops->llseek = seq_lseek;
1633 afinfo->seq_fops->release = seq_release_private;
1635 p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
1643 void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo)
1647 proc_net_remove(afinfo->name);
1648 memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops));
1651 static void get_openreq4(struct sock *sk, struct request_sock *req,
1652 char *tmpbuf, int i, int uid)
1654 const struct inet_request_sock *ireq = inet_rsk(req);
1655 int ttd = req->expires - jiffies;
1657 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
1658 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p",
1661 ntohs(inet_sk(sk)->sport),
1663 ntohs(ireq->rmt_port),
1665 0, 0, /* could print option size, but that is af dependent. */
1666 1, /* timers active (only the expire timer) */
1667 jiffies_to_clock_t(ttd),
1670 0, /* non standard timer */
1671 0, /* open_requests have no inode */
1672 atomic_read(&sk->sk_refcnt),
1676 static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
1679 unsigned long timer_expires;
1680 struct tcp_sock *tp = tcp_sk(sp);
1681 const struct inet_connection_sock *icsk = inet_csk(sp);
1682 struct inet_sock *inet = inet_sk(sp);
1683 unsigned int dest = inet->daddr;
1684 unsigned int src = inet->rcv_saddr;
1685 __u16 destp = ntohs(inet->dport);
1686 __u16 srcp = ntohs(inet->sport);
1688 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1690 timer_expires = icsk->icsk_timeout;
1691 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1693 timer_expires = icsk->icsk_timeout;
1694 } else if (timer_pending(&sp->sk_timer)) {
1696 timer_expires = sp->sk_timer.expires;
1699 timer_expires = jiffies;
1702 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
1703 "%08X %5d %8d %lu %d %p %u %u %u %u %d",
1704 i, src, srcp, dest, destp, sp->sk_state,
1705 tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
1707 jiffies_to_clock_t(timer_expires - jiffies),
1708 icsk->icsk_retransmits,
1710 icsk->icsk_probes_out,
1712 atomic_read(&sp->sk_refcnt), sp,
1715 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1717 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
1720 static void get_timewait4_sock(struct inet_timewait_sock *tw, char *tmpbuf, int i)
1722 unsigned int dest, src;
1724 int ttd = tw->tw_ttd - jiffies;
1729 dest = tw->tw_daddr;
1730 src = tw->tw_rcv_saddr;
1731 destp = ntohs(tw->tw_dport);
1732 srcp = ntohs(tw->tw_sport);
1734 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
1735 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p",
1736 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
1737 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1738 atomic_read(&tw->tw_refcnt), tw);
1743 static int tcp4_seq_show(struct seq_file *seq, void *v)
1745 struct tcp_iter_state* st;
1746 char tmpbuf[TMPSZ + 1];
1748 if (v == SEQ_START_TOKEN) {
1749 seq_printf(seq, "%-*s\n", TMPSZ - 1,
1750 " sl local_address rem_address st tx_queue "
1751 "rx_queue tr tm->when retrnsmt uid timeout "
1757 switch (st->state) {
1758 case TCP_SEQ_STATE_LISTENING:
1759 case TCP_SEQ_STATE_ESTABLISHED:
1760 get_tcp4_sock(v, tmpbuf, st->num);
1762 case TCP_SEQ_STATE_OPENREQ:
1763 get_openreq4(st->syn_wait_sk, v, tmpbuf, st->num, st->uid);
1765 case TCP_SEQ_STATE_TIME_WAIT:
1766 get_timewait4_sock(v, tmpbuf, st->num);
1769 seq_printf(seq, "%-*s\n", TMPSZ - 1, tmpbuf);
1774 static struct file_operations tcp4_seq_fops;
1775 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1776 .owner = THIS_MODULE,
1779 .seq_show = tcp4_seq_show,
1780 .seq_fops = &tcp4_seq_fops,
1783 int __init tcp4_proc_init(void)
1785 return tcp_proc_register(&tcp4_seq_afinfo);
1788 void tcp4_proc_exit(void)
1790 tcp_proc_unregister(&tcp4_seq_afinfo);
1792 #endif /* CONFIG_PROC_FS */
1794 struct proto tcp_prot = {
1796 .owner = THIS_MODULE,
1798 .connect = tcp_v4_connect,
1799 .disconnect = tcp_disconnect,
1800 .accept = inet_csk_accept,
1802 .init = tcp_v4_init_sock,
1803 .destroy = tcp_v4_destroy_sock,
1804 .shutdown = tcp_shutdown,
1805 .setsockopt = tcp_setsockopt,
1806 .getsockopt = tcp_getsockopt,
1807 .sendmsg = tcp_sendmsg,
1808 .recvmsg = tcp_recvmsg,
1809 .backlog_rcv = tcp_v4_do_rcv,
1810 .hash = tcp_v4_hash,
1811 .unhash = tcp_unhash,
1812 .get_port = tcp_v4_get_port,
1813 .enter_memory_pressure = tcp_enter_memory_pressure,
1814 .sockets_allocated = &tcp_sockets_allocated,
1815 .orphan_count = &tcp_orphan_count,
1816 .memory_allocated = &tcp_memory_allocated,
1817 .memory_pressure = &tcp_memory_pressure,
1818 .sysctl_mem = sysctl_tcp_mem,
1819 .sysctl_wmem = sysctl_tcp_wmem,
1820 .sysctl_rmem = sysctl_tcp_rmem,
1821 .max_header = MAX_TCP_HEADER,
1822 .obj_size = sizeof(struct tcp_sock),
1823 .twsk_prot = &tcp_timewait_sock_ops,
1824 .rsk_prot = &tcp_request_sock_ops,
1829 void __init tcp_v4_init(struct net_proto_family *ops)
1831 int err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_TCP, &tcp_socket);
1833 panic("Failed to create the TCP control socket.\n");
1834 tcp_socket->sk->sk_allocation = GFP_ATOMIC;
1835 inet_sk(tcp_socket->sk)->uc_ttl = -1;
1837 /* Unhash it so that IP input processing does not even
1838 * see it, we do not wish this socket to see incoming
1841 tcp_socket->sk->sk_prot->unhash(tcp_socket->sk);
1844 EXPORT_SYMBOL(ipv4_specific);
1845 EXPORT_SYMBOL(inet_bind_bucket_create);
1846 EXPORT_SYMBOL(tcp_hashinfo);
1847 EXPORT_SYMBOL(tcp_prot);
1848 EXPORT_SYMBOL(tcp_unhash);
1849 EXPORT_SYMBOL(tcp_v4_conn_request);
1850 EXPORT_SYMBOL(tcp_v4_connect);
1851 EXPORT_SYMBOL(tcp_v4_do_rcv);
1852 EXPORT_SYMBOL(tcp_v4_remember_stamp);
1853 EXPORT_SYMBOL(tcp_v4_send_check);
1854 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1856 #ifdef CONFIG_PROC_FS
1857 EXPORT_SYMBOL(tcp_proc_register);
1858 EXPORT_SYMBOL(tcp_proc_unregister);
1860 EXPORT_SYMBOL(sysctl_local_port_range);
1861 EXPORT_SYMBOL(sysctl_tcp_low_latency);
1862 EXPORT_SYMBOL(sysctl_tcp_tw_reuse);