2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
22 #include <linux/module.h>
23 #include <linux/sysctl.h>
24 #include <linux/workqueue.h>
26 #include <net/inet_common.h>
29 int sysctl_tcp_syncookies __read_mostly = 1;
30 EXPORT_SYMBOL(sysctl_tcp_syncookies);
32 int sysctl_tcp_abort_on_overflow __read_mostly;
34 struct inet_timewait_death_row tcp_death_row = {
35 .sysctl_max_tw_buckets = NR_FILE * 2,
36 .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
37 .death_lock = __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock),
38 .hashinfo = &tcp_hashinfo,
39 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
40 (unsigned long)&tcp_death_row),
41 .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work,
42 inet_twdr_twkill_work),
43 /* Short-time timewait calendar */
46 .twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
47 (unsigned long)&tcp_death_row),
50 EXPORT_SYMBOL_GPL(tcp_death_row);
52 static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
56 if (after(end_seq, s_win) && before(seq, e_win))
58 return (seq == e_win && seq == end_seq);
62 * * Main purpose of TIME-WAIT state is to close connection gracefully,
63 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
64 * (and, probably, tail of data) and one or more our ACKs are lost.
65 * * What is TIME-WAIT timeout? It is associated with maximal packet
66 * lifetime in the internet, which results in wrong conclusion, that
67 * it is set to catch "old duplicate segments" wandering out of their path.
68 * It is not quite correct. This timeout is calculated so that it exceeds
69 * maximal retransmission timeout enough to allow to lose one (or more)
70 * segments sent by peer and our ACKs. This time may be calculated from RTO.
71 * * When TIME-WAIT socket receives RST, it means that another end
72 * finally closed and we are allowed to kill TIME-WAIT too.
73 * * Second purpose of TIME-WAIT is catching old duplicate segments.
74 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
75 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
76 * * If we invented some more clever way to catch duplicates
77 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
79 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
80 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
81 * from the very beginning.
83 * NOTE. With recycling (and later with fin-wait-2) TW bucket
84 * is _not_ stateless. It means, that strictly speaking we must
85 * spinlock it. I do not want! Well, probability of misbehaviour
86 * is ridiculously low and, seems, we could use some mb() tricks
87 * to avoid misread sequence numbers, states etc. --ANK
90 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
91 const struct tcphdr *th)
93 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
94 struct tcp_options_received tmp_opt;
97 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
98 tmp_opt.tstamp_ok = 1;
99 tcp_parse_options(skb, &tmp_opt, 1, NULL);
101 if (tmp_opt.saw_tstamp) {
102 tmp_opt.ts_recent = tcptw->tw_ts_recent;
103 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
104 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
108 if (tw->tw_substate == TCP_FIN_WAIT2) {
109 /* Just repeat all the checks of tcp_rcv_state_process() */
111 /* Out of window, send ACK */
113 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
115 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
121 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
126 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
127 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
129 return TCP_TW_SUCCESS;
132 /* New data or FIN. If new data arrive after half-duplex close,
136 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
138 inet_twsk_deschedule(tw, &tcp_death_row);
143 /* FIN arrived, enter true time-wait state. */
144 tw->tw_substate = TCP_TIME_WAIT;
145 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
146 if (tmp_opt.saw_tstamp) {
147 tcptw->tw_ts_recent_stamp = get_seconds();
148 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
151 /* I am shamed, but failed to make it more elegant.
152 * Yes, it is direct reference to IP, which is impossible
153 * to generalize to IPv6. Taking into account that IPv6
154 * do not understand recycling in any case, it not
155 * a big problem in practice. --ANK */
156 if (tw->tw_family == AF_INET &&
157 tcp_death_row.sysctl_tw_recycle && tcptw->tw_ts_recent_stamp &&
158 tcp_v4_tw_remember_stamp(tw))
159 inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout,
162 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
168 * Now real TIME-WAIT state.
171 * "When a connection is [...] on TIME-WAIT state [...]
172 * [a TCP] MAY accept a new SYN from the remote TCP to
173 * reopen the connection directly, if it:
175 * (1) assigns its initial sequence number for the new
176 * connection to be larger than the largest sequence
177 * number it used on the previous connection incarnation,
180 * (2) returns to TIME-WAIT state if the SYN turns out
181 * to be an old duplicate".
185 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
186 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
187 /* In window segment, it may be only reset or bare ack. */
190 /* This is TIME_WAIT assassination, in two flavors.
191 * Oh well... nobody has a sufficient solution to this
194 if (sysctl_tcp_rfc1337 == 0) {
196 inet_twsk_deschedule(tw, &tcp_death_row);
198 return TCP_TW_SUCCESS;
201 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
204 if (tmp_opt.saw_tstamp) {
205 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
206 tcptw->tw_ts_recent_stamp = get_seconds();
210 return TCP_TW_SUCCESS;
213 /* Out of window segment.
215 All the segments are ACKed immediately.
217 The only exception is new SYN. We accept it, if it is
218 not old duplicate and we are not in danger to be killed
219 by delayed old duplicates. RFC check is that it has
220 newer sequence number works at rates <40Mbit/sec.
221 However, if paws works, it is reliable AND even more,
222 we even may relax silly seq space cutoff.
224 RED-PEN: we violate main RFC requirement, if this SYN will appear
225 old duplicate (i.e. we receive RST in reply to SYN-ACK),
226 we must return socket to time-wait state. It is not good,
230 if (th->syn && !th->rst && !th->ack && !paws_reject &&
231 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
232 (tmp_opt.saw_tstamp &&
233 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
234 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
237 TCP_SKB_CB(skb)->when = isn;
242 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
245 /* In this case we must reset the TIMEWAIT timer.
247 * If it is ACKless SYN it may be both old duplicate
248 * and new good SYN with random sequence number <rcv_nxt.
249 * Do not reschedule in the last case.
251 if (paws_reject || th->ack)
252 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
255 /* Send ACK. Note, we do not put the bucket,
256 * it will be released by caller.
261 return TCP_TW_SUCCESS;
265 * Move a socket to time-wait or dead fin-wait-2 state.
267 void tcp_time_wait(struct sock *sk, int state, int timeo)
269 struct inet_timewait_sock *tw = NULL;
270 const struct inet_connection_sock *icsk = inet_csk(sk);
271 const struct tcp_sock *tp = tcp_sk(sk);
274 if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
275 recycle_ok = icsk->icsk_af_ops->remember_stamp(sk);
277 if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
278 tw = inet_twsk_alloc(sk, state);
281 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
282 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
284 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
285 tcptw->tw_rcv_nxt = tp->rcv_nxt;
286 tcptw->tw_snd_nxt = tp->snd_nxt;
287 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
288 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
289 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
291 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
292 if (tw->tw_family == PF_INET6) {
293 struct ipv6_pinfo *np = inet6_sk(sk);
294 struct inet6_timewait_sock *tw6;
296 tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
297 tw6 = inet6_twsk((struct sock *)tw);
298 ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr);
299 ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr);
300 tw->tw_ipv6only = np->ipv6only;
304 #ifdef CONFIG_TCP_MD5SIG
306 * The timewait bucket does not have the key DB from the
307 * sock structure. We just make a quick copy of the
308 * md5 key being used (if indeed we are using one)
309 * so the timewait ack generating code has the key.
312 struct tcp_md5sig_key *key;
313 memset(tcptw->tw_md5_key, 0, sizeof(tcptw->tw_md5_key));
314 tcptw->tw_md5_keylen = 0;
315 key = tp->af_specific->md5_lookup(sk, sk);
317 memcpy(&tcptw->tw_md5_key, key->key, key->keylen);
318 tcptw->tw_md5_keylen = key->keylen;
319 if (tcp_alloc_md5sig_pool(sk) == NULL)
325 /* Linkage updates. */
326 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
328 /* Get the TIME_WAIT timeout firing. */
333 tw->tw_timeout = rto;
335 tw->tw_timeout = TCP_TIMEWAIT_LEN;
336 if (state == TCP_TIME_WAIT)
337 timeo = TCP_TIMEWAIT_LEN;
340 inet_twsk_schedule(tw, &tcp_death_row, timeo,
344 /* Sorry, if we're out of memory, just CLOSE this
345 * socket up. We've got bigger problems than
346 * non-graceful socket closings.
348 LIMIT_NETDEBUG(KERN_INFO "TCP: time wait bucket table overflow\n");
351 tcp_update_metrics(sk);
355 void tcp_twsk_destructor(struct sock *sk)
357 #ifdef CONFIG_TCP_MD5SIG
358 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
359 if (twsk->tw_md5_keylen)
360 tcp_free_md5sig_pool();
364 EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
366 static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
367 struct request_sock *req)
369 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
372 /* This is not only more efficient than what we used to do, it eliminates
373 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
375 * Actually, we could lots of memory writes here. tp of listening
376 * socket contains all necessary default parameters.
378 struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
380 struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
383 const struct inet_request_sock *ireq = inet_rsk(req);
384 struct tcp_request_sock *treq = tcp_rsk(req);
385 struct inet_connection_sock *newicsk = inet_csk(newsk);
386 struct tcp_sock *newtp = tcp_sk(newsk);
387 struct tcp_sock *oldtp = tcp_sk(sk);
388 struct tcp_cookie_values *oldcvp = oldtp->cookie_values;
390 /* TCP Cookie Transactions require space for the cookie pair,
391 * as it differs for each connection. There is no need to
392 * copy any s_data_payload stored at the original socket.
393 * Failure will prevent resuming the connection.
395 * Presumed copied, in order of appearance:
396 * cookie_in_always, cookie_out_never
398 if (oldcvp != NULL) {
399 struct tcp_cookie_values *newcvp =
400 kzalloc(sizeof(*newtp->cookie_values),
403 if (newcvp != NULL) {
404 kref_init(&newcvp->kref);
405 newcvp->cookie_desired =
406 oldcvp->cookie_desired;
407 newtp->cookie_values = newcvp;
409 /* Not Yet Implemented */
410 newtp->cookie_values = NULL;
414 /* Now setup tcp_sock */
415 newtp->pred_flags = 0;
417 newtp->rcv_wup = newtp->copied_seq =
418 newtp->rcv_nxt = treq->rcv_isn + 1;
420 newtp->snd_sml = newtp->snd_una =
421 newtp->snd_nxt = newtp->snd_up =
422 treq->snt_isn + 1 + tcp_s_data_size(oldtp);
424 tcp_prequeue_init(newtp);
426 tcp_init_wl(newtp, treq->rcv_isn);
429 newtp->mdev = TCP_TIMEOUT_INIT;
430 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
432 newtp->packets_out = 0;
433 newtp->retrans_out = 0;
434 newtp->sacked_out = 0;
435 newtp->fackets_out = 0;
436 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
438 /* So many TCP implementations out there (incorrectly) count the
439 * initial SYN frame in their delayed-ACK and congestion control
440 * algorithms that we must have the following bandaid to talk
441 * efficiently to them. -DaveM
444 newtp->snd_cwnd_cnt = 0;
445 newtp->bytes_acked = 0;
447 newtp->frto_counter = 0;
448 newtp->frto_highmark = 0;
450 newicsk->icsk_ca_ops = &tcp_init_congestion_ops;
452 tcp_set_ca_state(newsk, TCP_CA_Open);
453 tcp_init_xmit_timers(newsk);
454 skb_queue_head_init(&newtp->out_of_order_queue);
455 newtp->write_seq = newtp->pushed_seq =
456 treq->snt_isn + 1 + tcp_s_data_size(oldtp);
458 newtp->rx_opt.saw_tstamp = 0;
460 newtp->rx_opt.dsack = 0;
461 newtp->rx_opt.num_sacks = 0;
465 if (sock_flag(newsk, SOCK_KEEPOPEN))
466 inet_csk_reset_keepalive_timer(newsk,
467 keepalive_time_when(newtp));
469 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
470 if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
472 tcp_enable_fack(newtp);
474 newtp->window_clamp = req->window_clamp;
475 newtp->rcv_ssthresh = req->rcv_wnd;
476 newtp->rcv_wnd = req->rcv_wnd;
477 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
478 if (newtp->rx_opt.wscale_ok) {
479 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
480 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
482 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
483 newtp->window_clamp = min(newtp->window_clamp, 65535U);
485 newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
486 newtp->rx_opt.snd_wscale);
487 newtp->max_window = newtp->snd_wnd;
489 if (newtp->rx_opt.tstamp_ok) {
490 newtp->rx_opt.ts_recent = req->ts_recent;
491 newtp->rx_opt.ts_recent_stamp = get_seconds();
492 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
494 newtp->rx_opt.ts_recent_stamp = 0;
495 newtp->tcp_header_len = sizeof(struct tcphdr);
497 #ifdef CONFIG_TCP_MD5SIG
498 newtp->md5sig_info = NULL; /*XXX*/
499 if (newtp->af_specific->md5_lookup(sk, newsk))
500 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
502 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
503 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
504 newtp->rx_opt.mss_clamp = req->mss;
505 TCP_ECN_openreq_child(newtp, req);
507 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
513 * Process an incoming packet for SYN_RECV sockets represented
517 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
518 struct request_sock *req,
519 struct request_sock **prev)
521 const struct tcphdr *th = tcp_hdr(skb);
522 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
524 struct tcp_options_received tmp_opt;
527 if ((th->doff > (sizeof(struct tcphdr)>>2)) && (req->ts_recent)) {
528 tmp_opt.tstamp_ok = 1;
529 tcp_parse_options(skb, &tmp_opt, 1, NULL);
531 if (tmp_opt.saw_tstamp) {
532 tmp_opt.ts_recent = req->ts_recent;
533 /* We do not store true stamp, but it is not required,
534 * it can be estimated (approximately)
537 tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
538 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
542 /* Check for pure retransmitted SYN. */
543 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
544 flg == TCP_FLAG_SYN &&
547 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
548 * this case on figure 6 and figure 8, but formal
549 * protocol description says NOTHING.
550 * To be more exact, it says that we should send ACK,
551 * because this segment (at least, if it has no data)
554 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
555 * describe SYN-RECV state. All the description
556 * is wrong, we cannot believe to it and should
557 * rely only on common sense and implementation
560 * Enforce "SYN-ACK" according to figure 8, figure 6
561 * of RFC793, fixed by RFC1122.
563 req->rsk_ops->rtx_syn_ack(sk, req, NULL);
567 /* Further reproduces section "SEGMENT ARRIVES"
568 for state SYN-RECEIVED of RFC793.
569 It is broken, however, it does not work only
570 when SYNs are crossed.
572 You would think that SYN crossing is impossible here, since
573 we should have a SYN_SENT socket (from connect()) on our end,
574 but this is not true if the crossed SYNs were sent to both
575 ends by a malicious third party. We must defend against this,
576 and to do that we first verify the ACK (as per RFC793, page
577 36) and reset if it is invalid. Is this a true full defense?
578 To convince ourselves, let us consider a way in which the ACK
579 test can still pass in this 'malicious crossed SYNs' case.
580 Malicious sender sends identical SYNs (and thus identical sequence
581 numbers) to both A and B:
586 By our good fortune, both A and B select the same initial
587 send sequence number of seven :-)
589 A: sends SYN|ACK, seq=7, ack_seq=8
590 B: sends SYN|ACK, seq=7, ack_seq=8
592 So we are now A eating this SYN|ACK, ACK test passes. So
593 does sequence test, SYN is truncated, and thus we consider
596 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
597 bare ACK. Otherwise, we create an established connection. Both
598 ends (listening sockets) accept the new incoming connection and try
599 to talk to each other. 8-)
601 Note: This case is both harmless, and rare. Possibility is about the
602 same as us discovering intelligent life on another plant tomorrow.
604 But generally, we should (RFC lies!) to accept ACK
605 from SYNACK both here and in tcp_rcv_state_process().
606 tcp_rcv_state_process() does not, hence, we do not too.
608 Note that the case is absolutely generic:
609 we cannot optimize anything here without
610 violating protocol. All the checks must be made
611 before attempt to create socket.
614 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
615 * and the incoming segment acknowledges something not yet
616 * sent (the segment carries an unacceptable ACK) ...
619 * Invalid ACK: reset will be sent by listening socket
621 if ((flg & TCP_FLAG_ACK) &&
622 (TCP_SKB_CB(skb)->ack_seq !=
623 tcp_rsk(req)->snt_isn + 1 + tcp_s_data_size(tcp_sk(sk))))
626 /* Also, it would be not so bad idea to check rcv_tsecr, which
627 * is essentially ACK extension and too early or too late values
628 * should cause reset in unsynchronized states.
631 /* RFC793: "first check sequence number". */
633 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
634 tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) {
635 /* Out of window: send ACK and drop. */
636 if (!(flg & TCP_FLAG_RST))
637 req->rsk_ops->send_ack(sk, skb, req);
639 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
643 /* In sequence, PAWS is OK. */
645 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1))
646 req->ts_recent = tmp_opt.rcv_tsval;
648 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
649 /* Truncate SYN, it is out of window starting
650 at tcp_rsk(req)->rcv_isn + 1. */
651 flg &= ~TCP_FLAG_SYN;
654 /* RFC793: "second check the RST bit" and
655 * "fourth, check the SYN bit"
657 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
658 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
659 goto embryonic_reset;
662 /* ACK sequence verified above, just make sure ACK is
663 * set. If ACK not set, just silently drop the packet.
665 if (!(flg & TCP_FLAG_ACK))
668 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
669 if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
670 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
671 inet_rsk(req)->acked = 1;
675 /* OK, ACK is valid, create big socket and
676 * feed this segment to it. It will repeat all
677 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
678 * ESTABLISHED STATE. If it will be dropped after
679 * socket is created, wait for troubles.
681 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
683 goto listen_overflow;
685 inet_csk_reqsk_queue_unlink(sk, req, prev);
686 inet_csk_reqsk_queue_removed(sk, req);
688 inet_csk_reqsk_queue_add(sk, req, child);
692 if (!sysctl_tcp_abort_on_overflow) {
693 inet_rsk(req)->acked = 1;
698 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
699 if (!(flg & TCP_FLAG_RST))
700 req->rsk_ops->send_reset(sk, skb);
702 inet_csk_reqsk_queue_drop(sk, req, prev);
707 * Queue segment on the new socket if the new socket is active,
708 * otherwise we just shortcircuit this and continue with
712 int tcp_child_process(struct sock *parent, struct sock *child,
716 int state = child->sk_state;
718 if (!sock_owned_by_user(child)) {
719 ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb),
721 /* Wakeup parent, send SIGIO */
722 if (state == TCP_SYN_RECV && child->sk_state != state)
723 parent->sk_data_ready(parent, 0);
725 /* Alas, it is possible again, because we do lookup
726 * in main socket hash table and lock on listening
727 * socket does not protect us more.
729 sk_add_backlog(child, skb);
732 bh_unlock_sock(child);
737 EXPORT_SYMBOL(tcp_check_req);
738 EXPORT_SYMBOL(tcp_child_process);
739 EXPORT_SYMBOL(tcp_create_openreq_child);
740 EXPORT_SYMBOL(tcp_timewait_state_process);