2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_minisocks.c,v 1.15 2002/02/01 22:01:04 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
23 #include <linux/config.h>
25 #include <linux/module.h>
26 #include <linux/sysctl.h>
27 #include <linux/workqueue.h>
29 #include <net/inet_common.h>
33 #define SYNC_INIT 0 /* let the user enable it */
38 /* New-style handling of TIME_WAIT sockets. */
40 static void inet_twdr_hangman(unsigned long data);
41 static void inet_twdr_twkill_work(void *data);
42 static void inet_twdr_twcal_tick(unsigned long data);
44 int sysctl_tcp_syncookies = SYNC_INIT;
45 int sysctl_tcp_abort_on_overflow;
47 struct inet_timewait_death_row tcp_death_row = {
48 .sysctl_max_tw_buckets = NR_FILE * 2,
49 .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
50 .death_lock = SPIN_LOCK_UNLOCKED,
51 .hashinfo = &tcp_hashinfo,
52 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
53 (unsigned long)&tcp_death_row),
54 .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work,
55 inet_twdr_twkill_work,
57 /* Short-time timewait calendar */
60 .twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
61 (unsigned long)&tcp_death_row),
64 EXPORT_SYMBOL_GPL(tcp_death_row);
66 static void inet_twsk_schedule(struct inet_timewait_sock *tw,
67 struct inet_timewait_death_row *twdr,
70 static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
74 if (after(end_seq, s_win) && before(seq, e_win))
76 return (seq == e_win && seq == end_seq);
80 * * Main purpose of TIME-WAIT state is to close connection gracefully,
81 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
82 * (and, probably, tail of data) and one or more our ACKs are lost.
83 * * What is TIME-WAIT timeout? It is associated with maximal packet
84 * lifetime in the internet, which results in wrong conclusion, that
85 * it is set to catch "old duplicate segments" wandering out of their path.
86 * It is not quite correct. This timeout is calculated so that it exceeds
87 * maximal retransmission timeout enough to allow to lose one (or more)
88 * segments sent by peer and our ACKs. This time may be calculated from RTO.
89 * * When TIME-WAIT socket receives RST, it means that another end
90 * finally closed and we are allowed to kill TIME-WAIT too.
91 * * Second purpose of TIME-WAIT is catching old duplicate segments.
92 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
93 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
94 * * If we invented some more clever way to catch duplicates
95 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
97 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
98 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
99 * from the very beginning.
101 * NOTE. With recycling (and later with fin-wait-2) TW bucket
102 * is _not_ stateless. It means, that strictly speaking we must
103 * spinlock it. I do not want! Well, probability of misbehaviour
104 * is ridiculously low and, seems, we could use some mb() tricks
105 * to avoid misread sequence numbers, states etc. --ANK
108 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
109 const struct tcphdr *th)
111 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
112 struct tcp_options_received tmp_opt;
115 tmp_opt.saw_tstamp = 0;
116 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
117 tcp_parse_options(skb, &tmp_opt, 0);
119 if (tmp_opt.saw_tstamp) {
120 tmp_opt.ts_recent = tcptw->tw_ts_recent;
121 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
122 paws_reject = tcp_paws_check(&tmp_opt, th->rst);
126 if (tw->tw_substate == TCP_FIN_WAIT2) {
127 /* Just repeat all the checks of tcp_rcv_state_process() */
129 /* Out of window, send ACK */
131 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
133 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
139 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
143 if (!after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
144 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
146 return TCP_TW_SUCCESS;
149 /* New data or FIN. If new data arrive after half-duplex close,
153 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
155 inet_twsk_deschedule(tw, &tcp_death_row);
160 /* FIN arrived, enter true time-wait state. */
161 tw->tw_substate = TCP_TIME_WAIT;
162 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
163 if (tmp_opt.saw_tstamp) {
164 tcptw->tw_ts_recent_stamp = xtime.tv_sec;
165 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
168 /* I am shamed, but failed to make it more elegant.
169 * Yes, it is direct reference to IP, which is impossible
170 * to generalize to IPv6. Taking into account that IPv6
171 * do not undertsnad recycling in any case, it not
172 * a big problem in practice. --ANK */
173 if (tw->tw_family == AF_INET &&
174 tcp_death_row.sysctl_tw_recycle && tcptw->tw_ts_recent_stamp &&
175 tcp_v4_tw_remember_stamp(tw))
176 inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout);
178 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN);
183 * Now real TIME-WAIT state.
186 * "When a connection is [...] on TIME-WAIT state [...]
187 * [a TCP] MAY accept a new SYN from the remote TCP to
188 * reopen the connection directly, if it:
190 * (1) assigns its initial sequence number for the new
191 * connection to be larger than the largest sequence
192 * number it used on the previous connection incarnation,
195 * (2) returns to TIME-WAIT state if the SYN turns out
196 * to be an old duplicate".
200 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
201 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
202 /* In window segment, it may be only reset or bare ack. */
205 /* This is TIME_WAIT assasination, in two flavors.
206 * Oh well... nobody has a sufficient solution to this
209 if (sysctl_tcp_rfc1337 == 0) {
211 inet_twsk_deschedule(tw, &tcp_death_row);
213 return TCP_TW_SUCCESS;
216 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN);
218 if (tmp_opt.saw_tstamp) {
219 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
220 tcptw->tw_ts_recent_stamp = xtime.tv_sec;
224 return TCP_TW_SUCCESS;
227 /* Out of window segment.
229 All the segments are ACKed immediately.
231 The only exception is new SYN. We accept it, if it is
232 not old duplicate and we are not in danger to be killed
233 by delayed old duplicates. RFC check is that it has
234 newer sequence number works at rates <40Mbit/sec.
235 However, if paws works, it is reliable AND even more,
236 we even may relax silly seq space cutoff.
238 RED-PEN: we violate main RFC requirement, if this SYN will appear
239 old duplicate (i.e. we receive RST in reply to SYN-ACK),
240 we must return socket to time-wait state. It is not good,
244 if (th->syn && !th->rst && !th->ack && !paws_reject &&
245 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
246 (tmp_opt.saw_tstamp &&
247 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
248 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
251 TCP_SKB_CB(skb)->when = isn;
256 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
259 /* In this case we must reset the TIMEWAIT timer.
261 * If it is ACKless SYN it may be both old duplicate
262 * and new good SYN with random sequence number <rcv_nxt.
263 * Do not reschedule in the last case.
265 if (paws_reject || th->ack)
266 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN);
268 /* Send ACK. Note, we do not put the bucket,
269 * it will be released by caller.
274 return TCP_TW_SUCCESS;
278 * Move a socket to time-wait or dead fin-wait-2 state.
280 void tcp_time_wait(struct sock *sk, int state, int timeo)
282 struct inet_timewait_sock *tw = NULL;
283 const struct tcp_sock *tp = tcp_sk(sk);
286 if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
287 recycle_ok = tp->af_specific->remember_stamp(sk);
289 if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
290 tw = inet_twsk_alloc(sk, state);
293 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
294 const struct inet_connection_sock *icsk = inet_csk(sk);
295 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
297 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
298 tcptw->tw_rcv_nxt = tp->rcv_nxt;
299 tcptw->tw_snd_nxt = tp->snd_nxt;
300 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
301 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
302 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
304 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
305 if (tw->tw_family == PF_INET6) {
306 struct ipv6_pinfo *np = inet6_sk(sk);
307 struct tcp6_timewait_sock *tcp6tw = tcp6_twsk((struct sock *)tw);
309 ipv6_addr_copy(&tcp6tw->tw_v6_daddr, &np->daddr);
310 ipv6_addr_copy(&tcp6tw->tw_v6_rcv_saddr, &np->rcv_saddr);
311 tw->tw_ipv6only = np->ipv6only;
314 /* Linkage updates. */
315 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
317 /* Get the TIME_WAIT timeout firing. */
322 tw->tw_timeout = rto;
324 tw->tw_timeout = TCP_TIMEWAIT_LEN;
325 if (state == TCP_TIME_WAIT)
326 timeo = TCP_TIMEWAIT_LEN;
329 inet_twsk_schedule(tw, &tcp_death_row, timeo);
332 /* Sorry, if we're out of memory, just CLOSE this
333 * socket up. We've got bigger problems than
334 * non-graceful socket closings.
337 printk(KERN_INFO "TCP: time wait bucket table overflow\n");
340 tcp_update_metrics(sk);
344 /* Returns non-zero if quota exceeded. */
345 static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
348 struct inet_timewait_sock *tw;
349 struct hlist_node *node;
353 /* NOTE: compare this to previous version where lock
354 * was released after detaching chain. It was racy,
355 * because tw buckets are scheduled in not serialized context
356 * in 2.3 (with netfilter), and with softnet it is common, because
357 * soft irqs are not sequenced.
362 inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) {
363 __inet_twsk_del_dead_node(tw);
364 spin_unlock(&twdr->death_lock);
365 __inet_twsk_kill(tw, twdr->hashinfo);
368 spin_lock(&twdr->death_lock);
369 if (killed > INET_TWDR_TWKILL_QUOTA) {
374 /* While we dropped twdr->death_lock, another cpu may have
375 * killed off the next TW bucket in the list, therefore
376 * do a fresh re-read of the hlist head node with the
377 * lock reacquired. We still use the hlist traversal
378 * macro in order to get the prefetches.
383 twdr->tw_count -= killed;
384 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED, killed);
389 static void inet_twdr_hangman(unsigned long data)
391 struct inet_timewait_death_row *twdr;
392 int unsigned need_timer;
394 twdr = (struct inet_timewait_death_row *)data;
395 spin_lock(&twdr->death_lock);
397 if (twdr->tw_count == 0)
401 if (inet_twdr_do_twkill_work(twdr, twdr->slot)) {
402 twdr->thread_slots |= (1 << twdr->slot);
404 schedule_work(&twdr->twkill_work);
407 /* We purged the entire slot, anything left? */
411 twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1));
413 mod_timer(&twdr->tw_timer, jiffies + twdr->period);
415 spin_unlock(&twdr->death_lock);
418 extern void twkill_slots_invalid(void);
420 static void inet_twdr_twkill_work(void *data)
422 struct inet_timewait_death_row *twdr = data;
425 if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8))
426 twkill_slots_invalid();
428 while (twdr->thread_slots) {
429 spin_lock_bh(&twdr->death_lock);
430 for (i = 0; i < INET_TWDR_TWKILL_SLOTS; i++) {
431 if (!(twdr->thread_slots & (1 << i)))
434 while (inet_twdr_do_twkill_work(twdr, i) != 0) {
435 if (need_resched()) {
436 spin_unlock_bh(&twdr->death_lock);
438 spin_lock_bh(&twdr->death_lock);
442 twdr->thread_slots &= ~(1 << i);
444 spin_unlock_bh(&twdr->death_lock);
448 /* These are always called from BH context. See callers in
449 * tcp_input.c to verify this.
452 /* This is for handling early-kills of TIME_WAIT sockets. */
453 void inet_twsk_deschedule(struct inet_timewait_sock *tw,
454 struct inet_timewait_death_row *twdr)
456 spin_lock(&twdr->death_lock);
457 if (inet_twsk_del_dead_node(tw)) {
459 if (--twdr->tw_count == 0)
460 del_timer(&twdr->tw_timer);
462 spin_unlock(&twdr->death_lock);
463 __inet_twsk_kill(tw, twdr->hashinfo);
466 static void inet_twsk_schedule(struct inet_timewait_sock *tw,
467 struct inet_timewait_death_row *twdr,
470 struct hlist_head *list;
473 /* timeout := RTO * 3.5
475 * 3.5 = 1+2+0.5 to wait for two retransmits.
477 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
478 * our ACK acking that FIN can be lost. If N subsequent retransmitted
479 * FINs (or previous seqments) are lost (probability of such event
480 * is p^(N+1), where p is probability to lose single packet and
481 * time to detect the loss is about RTO*(2^N - 1) with exponential
482 * backoff). Normal timewait length is calculated so, that we
483 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
484 * [ BTW Linux. following BSD, violates this requirement waiting
485 * only for 60sec, we should wait at least for 240 secs.
486 * Well, 240 consumes too much of resources 8)
488 * This interval is not reduced to catch old duplicate and
489 * responces to our wandering segments living for two MSLs.
490 * However, if we use PAWS to detect
491 * old duplicates, we can reduce the interval to bounds required
492 * by RTO, rather than MSL. So, if peer understands PAWS, we
493 * kill tw bucket after 3.5*RTO (it is important that this number
494 * is greater than TS tick!) and detect old duplicates with help
497 slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK;
499 spin_lock(&twdr->death_lock);
501 /* Unlink it, if it was scheduled */
502 if (inet_twsk_del_dead_node(tw))
505 atomic_inc(&tw->tw_refcnt);
507 if (slot >= INET_TWDR_RECYCLE_SLOTS) {
508 /* Schedule to slow timer */
509 if (timeo >= TCP_TIMEWAIT_LEN) {
510 slot = INET_TWDR_TWKILL_SLOTS - 1;
512 slot = (timeo + twdr->period - 1) / twdr->period;
513 if (slot >= INET_TWDR_TWKILL_SLOTS)
514 slot = INET_TWDR_TWKILL_SLOTS - 1;
516 tw->tw_ttd = jiffies + timeo;
517 slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1);
518 list = &twdr->cells[slot];
520 tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK);
522 if (twdr->twcal_hand < 0) {
523 twdr->twcal_hand = 0;
524 twdr->twcal_jiffie = jiffies;
525 twdr->twcal_timer.expires = twdr->twcal_jiffie +
526 (slot << INET_TWDR_RECYCLE_TICK);
527 add_timer(&twdr->twcal_timer);
529 if (time_after(twdr->twcal_timer.expires,
530 jiffies + (slot << INET_TWDR_RECYCLE_TICK)))
531 mod_timer(&twdr->twcal_timer,
532 jiffies + (slot << INET_TWDR_RECYCLE_TICK));
533 slot = (twdr->twcal_hand + slot) & (INET_TWDR_RECYCLE_SLOTS - 1);
535 list = &twdr->twcal_row[slot];
538 hlist_add_head(&tw->tw_death_node, list);
540 if (twdr->tw_count++ == 0)
541 mod_timer(&twdr->tw_timer, jiffies + twdr->period);
542 spin_unlock(&twdr->death_lock);
545 void inet_twdr_twcal_tick(unsigned long data)
547 struct inet_timewait_death_row *twdr;
550 unsigned long now = jiffies;
554 twdr = (struct inet_timewait_death_row *)data;
556 spin_lock(&twdr->death_lock);
557 if (twdr->twcal_hand < 0)
560 slot = twdr->twcal_hand;
561 j = twdr->twcal_jiffie;
563 for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) {
564 if (time_before_eq(j, now)) {
565 struct hlist_node *node, *safe;
566 struct inet_timewait_sock *tw;
568 inet_twsk_for_each_inmate_safe(tw, node, safe,
569 &twdr->twcal_row[slot]) {
570 __inet_twsk_del_dead_node(tw);
571 __inet_twsk_kill(tw, twdr->hashinfo);
578 twdr->twcal_jiffie = j;
579 twdr->twcal_hand = slot;
582 if (!hlist_empty(&twdr->twcal_row[slot])) {
583 mod_timer(&twdr->twcal_timer, j);
587 j += 1 << INET_TWDR_RECYCLE_TICK;
588 slot = (slot + 1) & (INET_TWDR_RECYCLE_SLOTS - 1);
590 twdr->twcal_hand = -1;
593 if ((twdr->tw_count -= killed) == 0)
594 del_timer(&twdr->tw_timer);
595 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED, killed);
596 spin_unlock(&twdr->death_lock);
599 /* This is not only more efficient than what we used to do, it eliminates
600 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
602 * Actually, we could lots of memory writes here. tp of listening
603 * socket contains all necessary default parameters.
605 struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
607 struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
610 const struct inet_request_sock *ireq = inet_rsk(req);
611 struct tcp_request_sock *treq = tcp_rsk(req);
612 struct inet_connection_sock *newicsk = inet_csk(sk);
613 struct tcp_sock *newtp;
615 /* Now setup tcp_sock */
616 newtp = tcp_sk(newsk);
617 newtp->pred_flags = 0;
618 newtp->rcv_nxt = treq->rcv_isn + 1;
619 newtp->snd_nxt = newtp->snd_una = newtp->snd_sml = treq->snt_isn + 1;
621 tcp_prequeue_init(newtp);
623 tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn);
626 newtp->mdev = TCP_TIMEOUT_INIT;
627 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
629 newtp->packets_out = 0;
631 newtp->retrans_out = 0;
632 newtp->sacked_out = 0;
633 newtp->fackets_out = 0;
634 newtp->snd_ssthresh = 0x7fffffff;
636 /* So many TCP implementations out there (incorrectly) count the
637 * initial SYN frame in their delayed-ACK and congestion control
638 * algorithms that we must have the following bandaid to talk
639 * efficiently to them. -DaveM
642 newtp->snd_cwnd_cnt = 0;
644 newtp->frto_counter = 0;
645 newtp->frto_highmark = 0;
647 newtp->ca_ops = &tcp_reno;
649 tcp_set_ca_state(newtp, TCP_CA_Open);
650 tcp_init_xmit_timers(newsk);
651 skb_queue_head_init(&newtp->out_of_order_queue);
652 newtp->rcv_wup = treq->rcv_isn + 1;
653 newtp->write_seq = treq->snt_isn + 1;
654 newtp->pushed_seq = newtp->write_seq;
655 newtp->copied_seq = treq->rcv_isn + 1;
657 newtp->rx_opt.saw_tstamp = 0;
659 newtp->rx_opt.dsack = 0;
660 newtp->rx_opt.eff_sacks = 0;
662 newtp->probes_out = 0;
663 newtp->rx_opt.num_sacks = 0;
666 if (sock_flag(newsk, SOCK_KEEPOPEN))
667 inet_csk_reset_keepalive_timer(newsk,
668 keepalive_time_when(newtp));
670 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
671 if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
673 newtp->rx_opt.sack_ok |= 2;
675 newtp->window_clamp = req->window_clamp;
676 newtp->rcv_ssthresh = req->rcv_wnd;
677 newtp->rcv_wnd = req->rcv_wnd;
678 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
679 if (newtp->rx_opt.wscale_ok) {
680 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
681 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
683 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
684 newtp->window_clamp = min(newtp->window_clamp, 65535U);
686 newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->rx_opt.snd_wscale;
687 newtp->max_window = newtp->snd_wnd;
689 if (newtp->rx_opt.tstamp_ok) {
690 newtp->rx_opt.ts_recent = req->ts_recent;
691 newtp->rx_opt.ts_recent_stamp = xtime.tv_sec;
692 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
694 newtp->rx_opt.ts_recent_stamp = 0;
695 newtp->tcp_header_len = sizeof(struct tcphdr);
697 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
698 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
699 newtp->rx_opt.mss_clamp = req->mss;
700 TCP_ECN_openreq_child(newtp, req);
701 if (newtp->ecn_flags&TCP_ECN_OK)
702 sock_set_flag(newsk, SOCK_NO_LARGESEND);
704 TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
710 * Process an incoming packet for SYN_RECV sockets represented
714 struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
715 struct request_sock *req,
716 struct request_sock **prev)
718 struct tcphdr *th = skb->h.th;
719 struct tcp_sock *tp = tcp_sk(sk);
720 u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
722 struct tcp_options_received tmp_opt;
725 tmp_opt.saw_tstamp = 0;
726 if (th->doff > (sizeof(struct tcphdr)>>2)) {
727 tcp_parse_options(skb, &tmp_opt, 0);
729 if (tmp_opt.saw_tstamp) {
730 tmp_opt.ts_recent = req->ts_recent;
731 /* We do not store true stamp, but it is not required,
732 * it can be estimated (approximately)
735 tmp_opt.ts_recent_stamp = xtime.tv_sec - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
736 paws_reject = tcp_paws_check(&tmp_opt, th->rst);
740 /* Check for pure retransmitted SYN. */
741 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
742 flg == TCP_FLAG_SYN &&
745 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
746 * this case on figure 6 and figure 8, but formal
747 * protocol description says NOTHING.
748 * To be more exact, it says that we should send ACK,
749 * because this segment (at least, if it has no data)
752 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
753 * describe SYN-RECV state. All the description
754 * is wrong, we cannot believe to it and should
755 * rely only on common sense and implementation
758 * Enforce "SYN-ACK" according to figure 8, figure 6
759 * of RFC793, fixed by RFC1122.
761 req->rsk_ops->rtx_syn_ack(sk, req, NULL);
765 /* Further reproduces section "SEGMENT ARRIVES"
766 for state SYN-RECEIVED of RFC793.
767 It is broken, however, it does not work only
768 when SYNs are crossed.
770 You would think that SYN crossing is impossible here, since
771 we should have a SYN_SENT socket (from connect()) on our end,
772 but this is not true if the crossed SYNs were sent to both
773 ends by a malicious third party. We must defend against this,
774 and to do that we first verify the ACK (as per RFC793, page
775 36) and reset if it is invalid. Is this a true full defense?
776 To convince ourselves, let us consider a way in which the ACK
777 test can still pass in this 'malicious crossed SYNs' case.
778 Malicious sender sends identical SYNs (and thus identical sequence
779 numbers) to both A and B:
784 By our good fortune, both A and B select the same initial
785 send sequence number of seven :-)
787 A: sends SYN|ACK, seq=7, ack_seq=8
788 B: sends SYN|ACK, seq=7, ack_seq=8
790 So we are now A eating this SYN|ACK, ACK test passes. So
791 does sequence test, SYN is truncated, and thus we consider
794 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
795 bare ACK. Otherwise, we create an established connection. Both
796 ends (listening sockets) accept the new incoming connection and try
797 to talk to each other. 8-)
799 Note: This case is both harmless, and rare. Possibility is about the
800 same as us discovering intelligent life on another plant tomorrow.
802 But generally, we should (RFC lies!) to accept ACK
803 from SYNACK both here and in tcp_rcv_state_process().
804 tcp_rcv_state_process() does not, hence, we do not too.
806 Note that the case is absolutely generic:
807 we cannot optimize anything here without
808 violating protocol. All the checks must be made
809 before attempt to create socket.
812 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
813 * and the incoming segment acknowledges something not yet
814 * sent (the segment carries an unaccaptable ACK) ...
817 * Invalid ACK: reset will be sent by listening socket
819 if ((flg & TCP_FLAG_ACK) &&
820 (TCP_SKB_CB(skb)->ack_seq != tcp_rsk(req)->snt_isn + 1))
823 /* Also, it would be not so bad idea to check rcv_tsecr, which
824 * is essentially ACK extension and too early or too late values
825 * should cause reset in unsynchronized states.
828 /* RFC793: "first check sequence number". */
830 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
831 tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) {
832 /* Out of window: send ACK and drop. */
833 if (!(flg & TCP_FLAG_RST))
834 req->rsk_ops->send_ack(skb, req);
836 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
840 /* In sequence, PAWS is OK. */
842 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1))
843 req->ts_recent = tmp_opt.rcv_tsval;
845 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
846 /* Truncate SYN, it is out of window starting
847 at tcp_rsk(req)->rcv_isn + 1. */
848 flg &= ~TCP_FLAG_SYN;
851 /* RFC793: "second check the RST bit" and
852 * "fourth, check the SYN bit"
854 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN))
855 goto embryonic_reset;
857 /* ACK sequence verified above, just make sure ACK is
858 * set. If ACK not set, just silently drop the packet.
860 if (!(flg & TCP_FLAG_ACK))
863 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
864 if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
865 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
866 inet_rsk(req)->acked = 1;
870 /* OK, ACK is valid, create big socket and
871 * feed this segment to it. It will repeat all
872 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
873 * ESTABLISHED STATE. If it will be dropped after
874 * socket is created, wait for troubles.
876 child = tp->af_specific->syn_recv_sock(sk, skb, req, NULL);
878 goto listen_overflow;
880 inet_csk_reqsk_queue_unlink(sk, req, prev);
881 inet_csk_reqsk_queue_removed(sk, req);
883 inet_csk_reqsk_queue_add(sk, req, child);
887 if (!sysctl_tcp_abort_on_overflow) {
888 inet_rsk(req)->acked = 1;
893 NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
894 if (!(flg & TCP_FLAG_RST))
895 req->rsk_ops->send_reset(skb);
897 inet_csk_reqsk_queue_drop(sk, req, prev);
902 * Queue segment on the new socket if the new socket is active,
903 * otherwise we just shortcircuit this and continue with
907 int tcp_child_process(struct sock *parent, struct sock *child,
911 int state = child->sk_state;
913 if (!sock_owned_by_user(child)) {
914 ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len);
916 /* Wakeup parent, send SIGIO */
917 if (state == TCP_SYN_RECV && child->sk_state != state)
918 parent->sk_data_ready(parent, 0);
920 /* Alas, it is possible again, because we do lookup
921 * in main socket hash table and lock on listening
922 * socket does not protect us more.
924 sk_add_backlog(child, skb);
927 bh_unlock_sock(child);
932 EXPORT_SYMBOL(tcp_check_req);
933 EXPORT_SYMBOL(tcp_child_process);
934 EXPORT_SYMBOL(tcp_create_openreq_child);
935 EXPORT_SYMBOL(tcp_timewait_state_process);
936 EXPORT_SYMBOL(inet_twsk_deschedule);