[TIMEWAIT]: Introduce inet_timewait_death_row
[safe/jmp/linux-2.6] / net / ipv4 / tcp_minisocks.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Implementation of the Transmission Control Protocol(TCP).
7  *
8  * Version:     $Id: tcp_minisocks.c,v 1.15 2002/02/01 22:01:04 davem Exp $
9  *
10  * Authors:     Ross Biro
11  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *              Mark Evans, <evansmp@uhura.aston.ac.uk>
13  *              Corey Minyard <wf-rch!minyard@relay.EU.net>
14  *              Florian La Roche, <flla@stud.uni-sb.de>
15  *              Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16  *              Linus Torvalds, <torvalds@cs.helsinki.fi>
17  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
18  *              Matthew Dillon, <dillon@apollo.west.oic.com>
19  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20  *              Jorge Cwik, <jorge@laser.satlink.net>
21  */
22
23 #include <linux/config.h>
24 #include <linux/mm.h>
25 #include <linux/module.h>
26 #include <linux/sysctl.h>
27 #include <linux/workqueue.h>
28 #include <net/tcp.h>
29 #include <net/inet_common.h>
30 #include <net/xfrm.h>
31
32 #ifdef CONFIG_SYSCTL
33 #define SYNC_INIT 0 /* let the user enable it */
34 #else
35 #define SYNC_INIT 1
36 #endif
37
38 /* New-style handling of TIME_WAIT sockets. */
39
40 static void inet_twdr_hangman(unsigned long data);
41 static void inet_twdr_twkill_work(void *data);
42 static void inet_twdr_twcal_tick(unsigned long data);
43
44 int sysctl_tcp_syncookies = SYNC_INIT; 
45 int sysctl_tcp_abort_on_overflow;
46
47 struct inet_timewait_death_row tcp_death_row = {
48         .sysctl_max_tw_buckets = NR_FILE * 2,
49         .period         = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
50         .death_lock     = SPIN_LOCK_UNLOCKED,
51         .hashinfo       = &tcp_hashinfo,
52         .tw_timer       = TIMER_INITIALIZER(inet_twdr_hangman, 0,
53                                             (unsigned long)&tcp_death_row),
54         .twkill_work    = __WORK_INITIALIZER(tcp_death_row.twkill_work,
55                                              inet_twdr_twkill_work,
56                                              &tcp_death_row),
57 /* Short-time timewait calendar */
58
59         .twcal_hand     = -1,
60         .twcal_timer    = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
61                                             (unsigned long)&tcp_death_row),
62 };
63
64 EXPORT_SYMBOL_GPL(tcp_death_row);
65
66 static void inet_twsk_schedule(struct inet_timewait_sock *tw,
67                                struct inet_timewait_death_row *twdr,
68                                const int timeo);
69
70 static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
71 {
72         if (seq == s_win)
73                 return 1;
74         if (after(end_seq, s_win) && before(seq, e_win))
75                 return 1;
76         return (seq == e_win && seq == end_seq);
77 }
78
79 /* 
80  * * Main purpose of TIME-WAIT state is to close connection gracefully,
81  *   when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
82  *   (and, probably, tail of data) and one or more our ACKs are lost.
83  * * What is TIME-WAIT timeout? It is associated with maximal packet
84  *   lifetime in the internet, which results in wrong conclusion, that
85  *   it is set to catch "old duplicate segments" wandering out of their path.
86  *   It is not quite correct. This timeout is calculated so that it exceeds
87  *   maximal retransmission timeout enough to allow to lose one (or more)
88  *   segments sent by peer and our ACKs. This time may be calculated from RTO.
89  * * When TIME-WAIT socket receives RST, it means that another end
90  *   finally closed and we are allowed to kill TIME-WAIT too.
91  * * Second purpose of TIME-WAIT is catching old duplicate segments.
92  *   Well, certainly it is pure paranoia, but if we load TIME-WAIT
93  *   with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
94  * * If we invented some more clever way to catch duplicates
95  *   (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
96  *
97  * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
98  * When you compare it to RFCs, please, read section SEGMENT ARRIVES
99  * from the very beginning.
100  *
101  * NOTE. With recycling (and later with fin-wait-2) TW bucket
102  * is _not_ stateless. It means, that strictly speaking we must
103  * spinlock it. I do not want! Well, probability of misbehaviour
104  * is ridiculously low and, seems, we could use some mb() tricks
105  * to avoid misread sequence numbers, states etc.  --ANK
106  */
107 enum tcp_tw_status
108 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
109                            const struct tcphdr *th)
110 {
111         struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
112         struct tcp_options_received tmp_opt;
113         int paws_reject = 0;
114
115         tmp_opt.saw_tstamp = 0;
116         if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
117                 tcp_parse_options(skb, &tmp_opt, 0);
118
119                 if (tmp_opt.saw_tstamp) {
120                         tmp_opt.ts_recent       = tcptw->tw_ts_recent;
121                         tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
122                         paws_reject = tcp_paws_check(&tmp_opt, th->rst);
123                 }
124         }
125
126         if (tw->tw_substate == TCP_FIN_WAIT2) {
127                 /* Just repeat all the checks of tcp_rcv_state_process() */
128
129                 /* Out of window, send ACK */
130                 if (paws_reject ||
131                     !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
132                                    tcptw->tw_rcv_nxt,
133                                    tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
134                         return TCP_TW_ACK;
135
136                 if (th->rst)
137                         goto kill;
138
139                 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
140                         goto kill_with_rst;
141
142                 /* Dup ACK? */
143                 if (!after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
144                     TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
145                         inet_twsk_put(tw);
146                         return TCP_TW_SUCCESS;
147                 }
148
149                 /* New data or FIN. If new data arrive after half-duplex close,
150                  * reset.
151                  */
152                 if (!th->fin ||
153                     TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
154 kill_with_rst:
155                         inet_twsk_deschedule(tw, &tcp_death_row);
156                         inet_twsk_put(tw);
157                         return TCP_TW_RST;
158                 }
159
160                 /* FIN arrived, enter true time-wait state. */
161                 tw->tw_substate   = TCP_TIME_WAIT;
162                 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
163                 if (tmp_opt.saw_tstamp) {
164                         tcptw->tw_ts_recent_stamp = xtime.tv_sec;
165                         tcptw->tw_ts_recent       = tmp_opt.rcv_tsval;
166                 }
167
168                 /* I am shamed, but failed to make it more elegant.
169                  * Yes, it is direct reference to IP, which is impossible
170                  * to generalize to IPv6. Taking into account that IPv6
171                  * do not undertsnad recycling in any case, it not
172                  * a big problem in practice. --ANK */
173                 if (tw->tw_family == AF_INET &&
174                     tcp_death_row.sysctl_tw_recycle && tcptw->tw_ts_recent_stamp &&
175                     tcp_v4_tw_remember_stamp(tw))
176                         inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout);
177                 else
178                         inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN);
179                 return TCP_TW_ACK;
180         }
181
182         /*
183          *      Now real TIME-WAIT state.
184          *
185          *      RFC 1122:
186          *      "When a connection is [...] on TIME-WAIT state [...]
187          *      [a TCP] MAY accept a new SYN from the remote TCP to
188          *      reopen the connection directly, if it:
189          *      
190          *      (1)  assigns its initial sequence number for the new
191          *      connection to be larger than the largest sequence
192          *      number it used on the previous connection incarnation,
193          *      and
194          *
195          *      (2)  returns to TIME-WAIT state if the SYN turns out 
196          *      to be an old duplicate".
197          */
198
199         if (!paws_reject &&
200             (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
201              (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
202                 /* In window segment, it may be only reset or bare ack. */
203
204                 if (th->rst) {
205                         /* This is TIME_WAIT assasination, in two flavors.
206                          * Oh well... nobody has a sufficient solution to this
207                          * protocol bug yet.
208                          */
209                         if (sysctl_tcp_rfc1337 == 0) {
210 kill:
211                                 inet_twsk_deschedule(tw, &tcp_death_row);
212                                 inet_twsk_put(tw);
213                                 return TCP_TW_SUCCESS;
214                         }
215                 }
216                 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN);
217
218                 if (tmp_opt.saw_tstamp) {
219                         tcptw->tw_ts_recent       = tmp_opt.rcv_tsval;
220                         tcptw->tw_ts_recent_stamp = xtime.tv_sec;
221                 }
222
223                 inet_twsk_put(tw);
224                 return TCP_TW_SUCCESS;
225         }
226
227         /* Out of window segment.
228
229            All the segments are ACKed immediately.
230
231            The only exception is new SYN. We accept it, if it is
232            not old duplicate and we are not in danger to be killed
233            by delayed old duplicates. RFC check is that it has
234            newer sequence number works at rates <40Mbit/sec.
235            However, if paws works, it is reliable AND even more,
236            we even may relax silly seq space cutoff.
237
238            RED-PEN: we violate main RFC requirement, if this SYN will appear
239            old duplicate (i.e. we receive RST in reply to SYN-ACK),
240            we must return socket to time-wait state. It is not good,
241            but not fatal yet.
242          */
243
244         if (th->syn && !th->rst && !th->ack && !paws_reject &&
245             (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
246              (tmp_opt.saw_tstamp &&
247               (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
248                 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
249                 if (isn == 0)
250                         isn++;
251                 TCP_SKB_CB(skb)->when = isn;
252                 return TCP_TW_SYN;
253         }
254
255         if (paws_reject)
256                 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
257
258         if(!th->rst) {
259                 /* In this case we must reset the TIMEWAIT timer.
260                  *
261                  * If it is ACKless SYN it may be both old duplicate
262                  * and new good SYN with random sequence number <rcv_nxt.
263                  * Do not reschedule in the last case.
264                  */
265                 if (paws_reject || th->ack)
266                         inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN);
267
268                 /* Send ACK. Note, we do not put the bucket,
269                  * it will be released by caller.
270                  */
271                 return TCP_TW_ACK;
272         }
273         inet_twsk_put(tw);
274         return TCP_TW_SUCCESS;
275 }
276
277 /* 
278  * Move a socket to time-wait or dead fin-wait-2 state.
279  */ 
280 void tcp_time_wait(struct sock *sk, int state, int timeo)
281 {
282         struct inet_timewait_sock *tw = NULL;
283         const struct tcp_sock *tp = tcp_sk(sk);
284         int recycle_ok = 0;
285
286         if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
287                 recycle_ok = tp->af_specific->remember_stamp(sk);
288
289         if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
290                 tw = inet_twsk_alloc(sk, state);
291
292         if (tw != NULL) {
293                 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
294                 const struct inet_connection_sock *icsk = inet_csk(sk);
295                 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
296
297                 tw->tw_rcv_wscale       = tp->rx_opt.rcv_wscale;
298                 tcptw->tw_rcv_nxt       = tp->rcv_nxt;
299                 tcptw->tw_snd_nxt       = tp->snd_nxt;
300                 tcptw->tw_rcv_wnd       = tcp_receive_window(tp);
301                 tcptw->tw_ts_recent     = tp->rx_opt.ts_recent;
302                 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
303
304 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
305                 if (tw->tw_family == PF_INET6) {
306                         struct ipv6_pinfo *np = inet6_sk(sk);
307                         struct tcp6_timewait_sock *tcp6tw = tcp6_twsk((struct sock *)tw);
308
309                         ipv6_addr_copy(&tcp6tw->tw_v6_daddr, &np->daddr);
310                         ipv6_addr_copy(&tcp6tw->tw_v6_rcv_saddr, &np->rcv_saddr);
311                         tw->tw_ipv6only = np->ipv6only;
312                 }
313 #endif
314                 /* Linkage updates. */
315                 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
316
317                 /* Get the TIME_WAIT timeout firing. */
318                 if (timeo < rto)
319                         timeo = rto;
320
321                 if (recycle_ok) {
322                         tw->tw_timeout = rto;
323                 } else {
324                         tw->tw_timeout = TCP_TIMEWAIT_LEN;
325                         if (state == TCP_TIME_WAIT)
326                                 timeo = TCP_TIMEWAIT_LEN;
327                 }
328
329                 inet_twsk_schedule(tw, &tcp_death_row, timeo);
330                 inet_twsk_put(tw);
331         } else {
332                 /* Sorry, if we're out of memory, just CLOSE this
333                  * socket up.  We've got bigger problems than
334                  * non-graceful socket closings.
335                  */
336                 if (net_ratelimit())
337                         printk(KERN_INFO "TCP: time wait bucket table overflow\n");
338         }
339
340         tcp_update_metrics(sk);
341         tcp_done(sk);
342 }
343
344 /* Returns non-zero if quota exceeded.  */
345 static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
346                                     const int slot)
347 {
348         struct inet_timewait_sock *tw;
349         struct hlist_node *node;
350         unsigned int killed;
351         int ret;
352
353         /* NOTE: compare this to previous version where lock
354          * was released after detaching chain. It was racy,
355          * because tw buckets are scheduled in not serialized context
356          * in 2.3 (with netfilter), and with softnet it is common, because
357          * soft irqs are not sequenced.
358          */
359         killed = 0;
360         ret = 0;
361 rescan:
362         inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) {
363                 __inet_twsk_del_dead_node(tw);
364                 spin_unlock(&twdr->death_lock);
365                 __inet_twsk_kill(tw, twdr->hashinfo);
366                 inet_twsk_put(tw);
367                 killed++;
368                 spin_lock(&twdr->death_lock);
369                 if (killed > INET_TWDR_TWKILL_QUOTA) {
370                         ret = 1;
371                         break;
372                 }
373
374                 /* While we dropped twdr->death_lock, another cpu may have
375                  * killed off the next TW bucket in the list, therefore
376                  * do a fresh re-read of the hlist head node with the
377                  * lock reacquired.  We still use the hlist traversal
378                  * macro in order to get the prefetches.
379                  */
380                 goto rescan;
381         }
382
383         twdr->tw_count -= killed;
384         NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED, killed);
385
386         return ret;
387 }
388
389 static void inet_twdr_hangman(unsigned long data)
390 {
391         struct inet_timewait_death_row *twdr;
392         int unsigned need_timer;
393
394         twdr = (struct inet_timewait_death_row *)data;
395         spin_lock(&twdr->death_lock);
396
397         if (twdr->tw_count == 0)
398                 goto out;
399
400         need_timer = 0;
401         if (inet_twdr_do_twkill_work(twdr, twdr->slot)) {
402                 twdr->thread_slots |= (1 << twdr->slot);
403                 mb();
404                 schedule_work(&twdr->twkill_work);
405                 need_timer = 1;
406         } else {
407                 /* We purged the entire slot, anything left?  */
408                 if (twdr->tw_count)
409                         need_timer = 1;
410         }
411         twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1));
412         if (need_timer)
413                 mod_timer(&twdr->tw_timer, jiffies + twdr->period);
414 out:
415         spin_unlock(&twdr->death_lock);
416 }
417
418 extern void twkill_slots_invalid(void);
419
420 static void inet_twdr_twkill_work(void *data)
421 {
422         struct inet_timewait_death_row *twdr = data;
423         int i;
424
425         if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8))
426                 twkill_slots_invalid();
427
428         while (twdr->thread_slots) {
429                 spin_lock_bh(&twdr->death_lock);
430                 for (i = 0; i < INET_TWDR_TWKILL_SLOTS; i++) {
431                         if (!(twdr->thread_slots & (1 << i)))
432                                 continue;
433
434                         while (inet_twdr_do_twkill_work(twdr, i) != 0) {
435                                 if (need_resched()) {
436                                         spin_unlock_bh(&twdr->death_lock);
437                                         schedule();
438                                         spin_lock_bh(&twdr->death_lock);
439                                 }
440                         }
441
442                         twdr->thread_slots &= ~(1 << i);
443                 }
444                 spin_unlock_bh(&twdr->death_lock);
445         }
446 }
447
448 /* These are always called from BH context.  See callers in
449  * tcp_input.c to verify this.
450  */
451
452 /* This is for handling early-kills of TIME_WAIT sockets. */
453 void inet_twsk_deschedule(struct inet_timewait_sock *tw,
454                           struct inet_timewait_death_row *twdr)
455 {
456         spin_lock(&twdr->death_lock);
457         if (inet_twsk_del_dead_node(tw)) {
458                 inet_twsk_put(tw);
459                 if (--twdr->tw_count == 0)
460                         del_timer(&twdr->tw_timer);
461         }
462         spin_unlock(&twdr->death_lock);
463         __inet_twsk_kill(tw, twdr->hashinfo);
464 }
465
466 static void inet_twsk_schedule(struct inet_timewait_sock *tw,
467                                struct inet_timewait_death_row *twdr,
468                                const int timeo)
469 {
470         struct hlist_head *list;
471         int slot;
472
473         /* timeout := RTO * 3.5
474          *
475          * 3.5 = 1+2+0.5 to wait for two retransmits.
476          *
477          * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
478          * our ACK acking that FIN can be lost. If N subsequent retransmitted
479          * FINs (or previous seqments) are lost (probability of such event
480          * is p^(N+1), where p is probability to lose single packet and
481          * time to detect the loss is about RTO*(2^N - 1) with exponential
482          * backoff). Normal timewait length is calculated so, that we
483          * waited at least for one retransmitted FIN (maximal RTO is 120sec).
484          * [ BTW Linux. following BSD, violates this requirement waiting
485          *   only for 60sec, we should wait at least for 240 secs.
486          *   Well, 240 consumes too much of resources 8)
487          * ]
488          * This interval is not reduced to catch old duplicate and
489          * responces to our wandering segments living for two MSLs.
490          * However, if we use PAWS to detect
491          * old duplicates, we can reduce the interval to bounds required
492          * by RTO, rather than MSL. So, if peer understands PAWS, we
493          * kill tw bucket after 3.5*RTO (it is important that this number
494          * is greater than TS tick!) and detect old duplicates with help
495          * of PAWS.
496          */
497         slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK;
498
499         spin_lock(&twdr->death_lock);
500
501         /* Unlink it, if it was scheduled */
502         if (inet_twsk_del_dead_node(tw))
503                 twdr->tw_count--;
504         else
505                 atomic_inc(&tw->tw_refcnt);
506
507         if (slot >= INET_TWDR_RECYCLE_SLOTS) {
508                 /* Schedule to slow timer */
509                 if (timeo >= TCP_TIMEWAIT_LEN) {
510                         slot = INET_TWDR_TWKILL_SLOTS - 1;
511                 } else {
512                         slot = (timeo + twdr->period - 1) / twdr->period;
513                         if (slot >= INET_TWDR_TWKILL_SLOTS)
514                                 slot = INET_TWDR_TWKILL_SLOTS - 1;
515                 }
516                 tw->tw_ttd = jiffies + timeo;
517                 slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1);
518                 list = &twdr->cells[slot];
519         } else {
520                 tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK);
521
522                 if (twdr->twcal_hand < 0) {
523                         twdr->twcal_hand = 0;
524                         twdr->twcal_jiffie = jiffies;
525                         twdr->twcal_timer.expires = twdr->twcal_jiffie +
526                                               (slot << INET_TWDR_RECYCLE_TICK);
527                         add_timer(&twdr->twcal_timer);
528                 } else {
529                         if (time_after(twdr->twcal_timer.expires,
530                                        jiffies + (slot << INET_TWDR_RECYCLE_TICK)))
531                                 mod_timer(&twdr->twcal_timer,
532                                           jiffies + (slot << INET_TWDR_RECYCLE_TICK));
533                         slot = (twdr->twcal_hand + slot) & (INET_TWDR_RECYCLE_SLOTS - 1);
534                 }
535                 list = &twdr->twcal_row[slot];
536         }
537
538         hlist_add_head(&tw->tw_death_node, list);
539
540         if (twdr->tw_count++ == 0)
541                 mod_timer(&twdr->tw_timer, jiffies + twdr->period);
542         spin_unlock(&twdr->death_lock);
543 }
544
545 void inet_twdr_twcal_tick(unsigned long data)
546 {
547         struct inet_timewait_death_row *twdr;
548         int n, slot;
549         unsigned long j;
550         unsigned long now = jiffies;
551         int killed = 0;
552         int adv = 0;
553
554         twdr = (struct inet_timewait_death_row *)data;
555
556         spin_lock(&twdr->death_lock);
557         if (twdr->twcal_hand < 0)
558                 goto out;
559
560         slot = twdr->twcal_hand;
561         j = twdr->twcal_jiffie;
562
563         for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) {
564                 if (time_before_eq(j, now)) {
565                         struct hlist_node *node, *safe;
566                         struct inet_timewait_sock *tw;
567
568                         inet_twsk_for_each_inmate_safe(tw, node, safe,
569                                                        &twdr->twcal_row[slot]) {
570                                 __inet_twsk_del_dead_node(tw);
571                                 __inet_twsk_kill(tw, twdr->hashinfo);
572                                 inet_twsk_put(tw);
573                                 killed++;
574                         }
575                 } else {
576                         if (!adv) {
577                                 adv = 1;
578                                 twdr->twcal_jiffie = j;
579                                 twdr->twcal_hand = slot;
580                         }
581
582                         if (!hlist_empty(&twdr->twcal_row[slot])) {
583                                 mod_timer(&twdr->twcal_timer, j);
584                                 goto out;
585                         }
586                 }
587                 j += 1 << INET_TWDR_RECYCLE_TICK;
588                 slot = (slot + 1) & (INET_TWDR_RECYCLE_SLOTS - 1);
589         }
590         twdr->twcal_hand = -1;
591
592 out:
593         if ((twdr->tw_count -= killed) == 0)
594                 del_timer(&twdr->tw_timer);
595         NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED, killed);
596         spin_unlock(&twdr->death_lock);
597 }
598
599 /* This is not only more efficient than what we used to do, it eliminates
600  * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
601  *
602  * Actually, we could lots of memory writes here. tp of listening
603  * socket contains all necessary default parameters.
604  */
605 struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
606 {
607         struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
608
609         if (newsk != NULL) {
610                 const struct inet_request_sock *ireq = inet_rsk(req);
611                 struct tcp_request_sock *treq = tcp_rsk(req);
612                 struct inet_connection_sock *newicsk = inet_csk(sk);
613                 struct tcp_sock *newtp;
614
615                 /* Now setup tcp_sock */
616                 newtp = tcp_sk(newsk);
617                 newtp->pred_flags = 0;
618                 newtp->rcv_nxt = treq->rcv_isn + 1;
619                 newtp->snd_nxt = newtp->snd_una = newtp->snd_sml = treq->snt_isn + 1;
620
621                 tcp_prequeue_init(newtp);
622
623                 tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn);
624
625                 newtp->srtt = 0;
626                 newtp->mdev = TCP_TIMEOUT_INIT;
627                 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
628
629                 newtp->packets_out = 0;
630                 newtp->left_out = 0;
631                 newtp->retrans_out = 0;
632                 newtp->sacked_out = 0;
633                 newtp->fackets_out = 0;
634                 newtp->snd_ssthresh = 0x7fffffff;
635
636                 /* So many TCP implementations out there (incorrectly) count the
637                  * initial SYN frame in their delayed-ACK and congestion control
638                  * algorithms that we must have the following bandaid to talk
639                  * efficiently to them.  -DaveM
640                  */
641                 newtp->snd_cwnd = 2;
642                 newtp->snd_cwnd_cnt = 0;
643
644                 newtp->frto_counter = 0;
645                 newtp->frto_highmark = 0;
646
647                 newtp->ca_ops = &tcp_reno;
648
649                 tcp_set_ca_state(newtp, TCP_CA_Open);
650                 tcp_init_xmit_timers(newsk);
651                 skb_queue_head_init(&newtp->out_of_order_queue);
652                 newtp->rcv_wup = treq->rcv_isn + 1;
653                 newtp->write_seq = treq->snt_isn + 1;
654                 newtp->pushed_seq = newtp->write_seq;
655                 newtp->copied_seq = treq->rcv_isn + 1;
656
657                 newtp->rx_opt.saw_tstamp = 0;
658
659                 newtp->rx_opt.dsack = 0;
660                 newtp->rx_opt.eff_sacks = 0;
661
662                 newtp->probes_out = 0;
663                 newtp->rx_opt.num_sacks = 0;
664                 newtp->urg_data = 0;
665
666                 if (sock_flag(newsk, SOCK_KEEPOPEN))
667                         inet_csk_reset_keepalive_timer(newsk,
668                                                        keepalive_time_when(newtp));
669
670                 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
671                 if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
672                         if (sysctl_tcp_fack)
673                                 newtp->rx_opt.sack_ok |= 2;
674                 }
675                 newtp->window_clamp = req->window_clamp;
676                 newtp->rcv_ssthresh = req->rcv_wnd;
677                 newtp->rcv_wnd = req->rcv_wnd;
678                 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
679                 if (newtp->rx_opt.wscale_ok) {
680                         newtp->rx_opt.snd_wscale = ireq->snd_wscale;
681                         newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
682                 } else {
683                         newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
684                         newtp->window_clamp = min(newtp->window_clamp, 65535U);
685                 }
686                 newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->rx_opt.snd_wscale;
687                 newtp->max_window = newtp->snd_wnd;
688
689                 if (newtp->rx_opt.tstamp_ok) {
690                         newtp->rx_opt.ts_recent = req->ts_recent;
691                         newtp->rx_opt.ts_recent_stamp = xtime.tv_sec;
692                         newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
693                 } else {
694                         newtp->rx_opt.ts_recent_stamp = 0;
695                         newtp->tcp_header_len = sizeof(struct tcphdr);
696                 }
697                 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
698                         newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
699                 newtp->rx_opt.mss_clamp = req->mss;
700                 TCP_ECN_openreq_child(newtp, req);
701                 if (newtp->ecn_flags&TCP_ECN_OK)
702                         sock_set_flag(newsk, SOCK_NO_LARGESEND);
703
704                 TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
705         }
706         return newsk;
707 }
708
709 /* 
710  *      Process an incoming packet for SYN_RECV sockets represented
711  *      as a request_sock.
712  */
713
714 struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
715                            struct request_sock *req,
716                            struct request_sock **prev)
717 {
718         struct tcphdr *th = skb->h.th;
719         struct tcp_sock *tp = tcp_sk(sk);
720         u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
721         int paws_reject = 0;
722         struct tcp_options_received tmp_opt;
723         struct sock *child;
724
725         tmp_opt.saw_tstamp = 0;
726         if (th->doff > (sizeof(struct tcphdr)>>2)) {
727                 tcp_parse_options(skb, &tmp_opt, 0);
728
729                 if (tmp_opt.saw_tstamp) {
730                         tmp_opt.ts_recent = req->ts_recent;
731                         /* We do not store true stamp, but it is not required,
732                          * it can be estimated (approximately)
733                          * from another data.
734                          */
735                         tmp_opt.ts_recent_stamp = xtime.tv_sec - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
736                         paws_reject = tcp_paws_check(&tmp_opt, th->rst);
737                 }
738         }
739
740         /* Check for pure retransmitted SYN. */
741         if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
742             flg == TCP_FLAG_SYN &&
743             !paws_reject) {
744                 /*
745                  * RFC793 draws (Incorrectly! It was fixed in RFC1122)
746                  * this case on figure 6 and figure 8, but formal
747                  * protocol description says NOTHING.
748                  * To be more exact, it says that we should send ACK,
749                  * because this segment (at least, if it has no data)
750                  * is out of window.
751                  *
752                  *  CONCLUSION: RFC793 (even with RFC1122) DOES NOT
753                  *  describe SYN-RECV state. All the description
754                  *  is wrong, we cannot believe to it and should
755                  *  rely only on common sense and implementation
756                  *  experience.
757                  *
758                  * Enforce "SYN-ACK" according to figure 8, figure 6
759                  * of RFC793, fixed by RFC1122.
760                  */
761                 req->rsk_ops->rtx_syn_ack(sk, req, NULL);
762                 return NULL;
763         }
764
765         /* Further reproduces section "SEGMENT ARRIVES"
766            for state SYN-RECEIVED of RFC793.
767            It is broken, however, it does not work only
768            when SYNs are crossed.
769
770            You would think that SYN crossing is impossible here, since
771            we should have a SYN_SENT socket (from connect()) on our end,
772            but this is not true if the crossed SYNs were sent to both
773            ends by a malicious third party.  We must defend against this,
774            and to do that we first verify the ACK (as per RFC793, page
775            36) and reset if it is invalid.  Is this a true full defense?
776            To convince ourselves, let us consider a way in which the ACK
777            test can still pass in this 'malicious crossed SYNs' case.
778            Malicious sender sends identical SYNs (and thus identical sequence
779            numbers) to both A and B:
780
781                 A: gets SYN, seq=7
782                 B: gets SYN, seq=7
783
784            By our good fortune, both A and B select the same initial
785            send sequence number of seven :-)
786
787                 A: sends SYN|ACK, seq=7, ack_seq=8
788                 B: sends SYN|ACK, seq=7, ack_seq=8
789
790            So we are now A eating this SYN|ACK, ACK test passes.  So
791            does sequence test, SYN is truncated, and thus we consider
792            it a bare ACK.
793
794            If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
795            bare ACK.  Otherwise, we create an established connection.  Both
796            ends (listening sockets) accept the new incoming connection and try
797            to talk to each other. 8-)
798
799            Note: This case is both harmless, and rare.  Possibility is about the
800            same as us discovering intelligent life on another plant tomorrow.
801
802            But generally, we should (RFC lies!) to accept ACK
803            from SYNACK both here and in tcp_rcv_state_process().
804            tcp_rcv_state_process() does not, hence, we do not too.
805
806            Note that the case is absolutely generic:
807            we cannot optimize anything here without
808            violating protocol. All the checks must be made
809            before attempt to create socket.
810          */
811
812         /* RFC793 page 36: "If the connection is in any non-synchronized state ...
813          *                  and the incoming segment acknowledges something not yet
814          *                  sent (the segment carries an unaccaptable ACK) ...
815          *                  a reset is sent."
816          *
817          * Invalid ACK: reset will be sent by listening socket
818          */
819         if ((flg & TCP_FLAG_ACK) &&
820             (TCP_SKB_CB(skb)->ack_seq != tcp_rsk(req)->snt_isn + 1))
821                 return sk;
822
823         /* Also, it would be not so bad idea to check rcv_tsecr, which
824          * is essentially ACK extension and too early or too late values
825          * should cause reset in unsynchronized states.
826          */
827
828         /* RFC793: "first check sequence number". */
829
830         if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
831                                           tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) {
832                 /* Out of window: send ACK and drop. */
833                 if (!(flg & TCP_FLAG_RST))
834                         req->rsk_ops->send_ack(skb, req);
835                 if (paws_reject)
836                         NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
837                 return NULL;
838         }
839
840         /* In sequence, PAWS is OK. */
841
842         if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1))
843                         req->ts_recent = tmp_opt.rcv_tsval;
844
845                 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
846                         /* Truncate SYN, it is out of window starting
847                            at tcp_rsk(req)->rcv_isn + 1. */
848                         flg &= ~TCP_FLAG_SYN;
849                 }
850
851                 /* RFC793: "second check the RST bit" and
852                  *         "fourth, check the SYN bit"
853                  */
854                 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN))
855                         goto embryonic_reset;
856
857                 /* ACK sequence verified above, just make sure ACK is
858                  * set.  If ACK not set, just silently drop the packet.
859                  */
860                 if (!(flg & TCP_FLAG_ACK))
861                         return NULL;
862
863                 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
864                 if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
865                     TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
866                         inet_rsk(req)->acked = 1;
867                         return NULL;
868                 }
869
870                 /* OK, ACK is valid, create big socket and
871                  * feed this segment to it. It will repeat all
872                  * the tests. THIS SEGMENT MUST MOVE SOCKET TO
873                  * ESTABLISHED STATE. If it will be dropped after
874                  * socket is created, wait for troubles.
875                  */
876                 child = tp->af_specific->syn_recv_sock(sk, skb, req, NULL);
877                 if (child == NULL)
878                         goto listen_overflow;
879
880                 inet_csk_reqsk_queue_unlink(sk, req, prev);
881                 inet_csk_reqsk_queue_removed(sk, req);
882
883                 inet_csk_reqsk_queue_add(sk, req, child);
884                 return child;
885
886         listen_overflow:
887                 if (!sysctl_tcp_abort_on_overflow) {
888                         inet_rsk(req)->acked = 1;
889                         return NULL;
890                 }
891
892         embryonic_reset:
893                 NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
894                 if (!(flg & TCP_FLAG_RST))
895                         req->rsk_ops->send_reset(skb);
896
897                 inet_csk_reqsk_queue_drop(sk, req, prev);
898                 return NULL;
899 }
900
901 /*
902  * Queue segment on the new socket if the new socket is active,
903  * otherwise we just shortcircuit this and continue with
904  * the new socket.
905  */
906
907 int tcp_child_process(struct sock *parent, struct sock *child,
908                       struct sk_buff *skb)
909 {
910         int ret = 0;
911         int state = child->sk_state;
912
913         if (!sock_owned_by_user(child)) {
914                 ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len);
915
916                 /* Wakeup parent, send SIGIO */
917                 if (state == TCP_SYN_RECV && child->sk_state != state)
918                         parent->sk_data_ready(parent, 0);
919         } else {
920                 /* Alas, it is possible again, because we do lookup
921                  * in main socket hash table and lock on listening
922                  * socket does not protect us more.
923                  */
924                 sk_add_backlog(child, skb);
925         }
926
927         bh_unlock_sock(child);
928         sock_put(child);
929         return ret;
930 }
931
932 EXPORT_SYMBOL(tcp_check_req);
933 EXPORT_SYMBOL(tcp_child_process);
934 EXPORT_SYMBOL(tcp_create_openreq_child);
935 EXPORT_SYMBOL(tcp_timewait_state_process);
936 EXPORT_SYMBOL(inet_twsk_deschedule);