2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the TCP module.
8 * Version: @(#)tcp.h 1.0.5 05/23/93
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
22 #define FASTRETRANS_DEBUG 1
24 #include <linux/config.h>
25 #include <linux/list.h>
26 #include <linux/tcp.h>
27 #include <linux/slab.h>
28 #include <linux/cache.h>
29 #include <linux/percpu.h>
30 #include <linux/skbuff.h>
32 #include <net/inet_connection_sock.h>
33 #include <net/inet_timewait_sock.h>
34 #include <net/inet_hashtables.h>
35 #include <net/checksum.h>
36 #include <net/request_sock.h>
40 #include <net/tcp_states.h>
42 #include <linux/seq_file.h>
44 extern struct inet_hashinfo tcp_hashinfo;
46 extern atomic_t tcp_orphan_count;
47 extern void tcp_time_wait(struct sock *sk, int state, int timeo);
49 #define MAX_TCP_HEADER (128 + MAX_HEADER)
52 * Never offer a window over 32767 without using window scaling. Some
53 * poor stacks do signed 16bit maths!
55 #define MAX_TCP_WINDOW 32767U
57 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
58 #define TCP_MIN_MSS 88U
60 /* Minimal RCV_MSS. */
61 #define TCP_MIN_RCVMSS 536U
63 /* The least MTU to use for probing */
64 #define TCP_BASE_MSS 512
66 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
67 #define TCP_FASTRETRANS_THRESH 3
69 /* Maximal reordering. */
70 #define TCP_MAX_REORDERING 127
72 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
73 #define TCP_MAX_QUICKACKS 16U
76 #define TCP_URG_VALID 0x0100
77 #define TCP_URG_NOTYET 0x0200
78 #define TCP_URG_READ 0x0400
80 #define TCP_RETR1 3 /*
81 * This is how many retries it does before it
82 * tries to figure out if the gateway is
83 * down. Minimal RFC value is 3; it corresponds
84 * to ~3sec-8min depending on RTO.
87 #define TCP_RETR2 15 /*
88 * This should take at least
89 * 90 minutes to time out.
90 * RFC1122 says that the limit is 100 sec.
91 * 15 is ~13-30min depending on RTO.
94 #define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
95 * connection: ~180sec is RFC minimum */
97 #define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
98 * connection: ~180sec is RFC minimum */
101 #define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned
102 * socket. 7 is ~50sec-16min.
106 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
107 * state, about 60 seconds */
108 #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
109 /* BSD style FIN_WAIT2 deadlock breaker.
110 * It used to be 3min, new value is 60sec,
111 * to combine FIN-WAIT-2 timeout with
115 #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
117 #define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
118 #define TCP_ATO_MIN ((unsigned)(HZ/25))
120 #define TCP_DELACK_MIN 4U
121 #define TCP_ATO_MIN 4U
123 #define TCP_RTO_MAX ((unsigned)(120*HZ))
124 #define TCP_RTO_MIN ((unsigned)(HZ/5))
125 #define TCP_TIMEOUT_INIT ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value */
127 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
128 * for local resources.
131 #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
132 #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
133 #define TCP_KEEPALIVE_INTVL (75*HZ)
135 #define MAX_TCP_KEEPIDLE 32767
136 #define MAX_TCP_KEEPINTVL 32767
137 #define MAX_TCP_KEEPCNT 127
138 #define MAX_TCP_SYNCNT 127
140 #define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
141 #define TCP_SYNQ_HSIZE 512 /* Size of SYNACK hash table */
143 #define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
144 #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
145 * after this time. It should be equal
146 * (or greater than) TCP_TIMEWAIT_LEN
147 * to provide reliability equal to one
148 * provided by timewait state.
150 #define TCP_PAWS_WINDOW 1 /* Replay window for per-host
151 * timestamps. It must be less than
152 * minimal timewait lifetime.
158 #define TCPOPT_NOP 1 /* Padding */
159 #define TCPOPT_EOL 0 /* End of options */
160 #define TCPOPT_MSS 2 /* Segment size negotiating */
161 #define TCPOPT_WINDOW 3 /* Window scaling */
162 #define TCPOPT_SACK_PERM 4 /* SACK Permitted */
163 #define TCPOPT_SACK 5 /* SACK Block */
164 #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
170 #define TCPOLEN_MSS 4
171 #define TCPOLEN_WINDOW 3
172 #define TCPOLEN_SACK_PERM 2
173 #define TCPOLEN_TIMESTAMP 10
175 /* But this is what stacks really send out. */
176 #define TCPOLEN_TSTAMP_ALIGNED 12
177 #define TCPOLEN_WSCALE_ALIGNED 4
178 #define TCPOLEN_SACKPERM_ALIGNED 4
179 #define TCPOLEN_SACK_BASE 2
180 #define TCPOLEN_SACK_BASE_ALIGNED 4
181 #define TCPOLEN_SACK_PERBLOCK 8
183 /* Flags in tp->nonagle */
184 #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
185 #define TCP_NAGLE_CORK 2 /* Socket is corked */
186 #define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
188 extern struct inet_timewait_death_row tcp_death_row;
190 /* sysctl variables for tcp */
191 extern int sysctl_tcp_timestamps;
192 extern int sysctl_tcp_window_scaling;
193 extern int sysctl_tcp_sack;
194 extern int sysctl_tcp_fin_timeout;
195 extern int sysctl_tcp_keepalive_time;
196 extern int sysctl_tcp_keepalive_probes;
197 extern int sysctl_tcp_keepalive_intvl;
198 extern int sysctl_tcp_syn_retries;
199 extern int sysctl_tcp_synack_retries;
200 extern int sysctl_tcp_retries1;
201 extern int sysctl_tcp_retries2;
202 extern int sysctl_tcp_orphan_retries;
203 extern int sysctl_tcp_syncookies;
204 extern int sysctl_tcp_retrans_collapse;
205 extern int sysctl_tcp_stdurg;
206 extern int sysctl_tcp_rfc1337;
207 extern int sysctl_tcp_abort_on_overflow;
208 extern int sysctl_tcp_max_orphans;
209 extern int sysctl_tcp_fack;
210 extern int sysctl_tcp_reordering;
211 extern int sysctl_tcp_ecn;
212 extern int sysctl_tcp_dsack;
213 extern int sysctl_tcp_mem[3];
214 extern int sysctl_tcp_wmem[3];
215 extern int sysctl_tcp_rmem[3];
216 extern int sysctl_tcp_app_win;
217 extern int sysctl_tcp_adv_win_scale;
218 extern int sysctl_tcp_tw_reuse;
219 extern int sysctl_tcp_frto;
220 extern int sysctl_tcp_low_latency;
221 extern int sysctl_tcp_nometrics_save;
222 extern int sysctl_tcp_moderate_rcvbuf;
223 extern int sysctl_tcp_tso_win_divisor;
224 extern int sysctl_tcp_abc;
225 extern int sysctl_tcp_mtu_probing;
226 extern int sysctl_tcp_base_mss;
227 extern int sysctl_tcp_workaround_signed_windows;
229 extern atomic_t tcp_memory_allocated;
230 extern atomic_t tcp_sockets_allocated;
231 extern int tcp_memory_pressure;
234 * The next routines deal with comparing 32 bit unsigned ints
235 * and worry about wraparound (automatic with unsigned arithmetic).
238 static inline int before(__u32 seq1, __u32 seq2)
240 return (__s32)(seq1-seq2) < 0;
243 static inline int after(__u32 seq1, __u32 seq2)
245 return (__s32)(seq2-seq1) < 0;
249 /* is s2<=s1<=s3 ? */
250 static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
252 return seq3 - seq2 >= seq1 - seq2;
256 extern struct proto tcp_prot;
258 DECLARE_SNMP_STAT(struct tcp_mib, tcp_statistics);
259 #define TCP_INC_STATS(field) SNMP_INC_STATS(tcp_statistics, field)
260 #define TCP_INC_STATS_BH(field) SNMP_INC_STATS_BH(tcp_statistics, field)
261 #define TCP_INC_STATS_USER(field) SNMP_INC_STATS_USER(tcp_statistics, field)
262 #define TCP_DEC_STATS(field) SNMP_DEC_STATS(tcp_statistics, field)
263 #define TCP_ADD_STATS_BH(field, val) SNMP_ADD_STATS_BH(tcp_statistics, field, val)
264 #define TCP_ADD_STATS_USER(field, val) SNMP_ADD_STATS_USER(tcp_statistics, field, val)
266 extern void tcp_v4_err(struct sk_buff *skb, u32);
268 extern void tcp_shutdown (struct sock *sk, int how);
270 extern int tcp_v4_rcv(struct sk_buff *skb);
272 extern int tcp_v4_remember_stamp(struct sock *sk);
274 extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
276 extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk,
277 struct msghdr *msg, size_t size);
278 extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags);
280 extern int tcp_ioctl(struct sock *sk,
284 extern int tcp_rcv_state_process(struct sock *sk,
289 extern int tcp_rcv_established(struct sock *sk,
294 extern void tcp_rcv_space_adjust(struct sock *sk);
296 extern int tcp_twsk_unique(struct sock *sk,
297 struct sock *sktw, void *twp);
299 static inline void tcp_dec_quickack_mode(struct sock *sk,
300 const unsigned int pkts)
302 struct inet_connection_sock *icsk = inet_csk(sk);
304 if (icsk->icsk_ack.quick) {
305 if (pkts >= icsk->icsk_ack.quick) {
306 icsk->icsk_ack.quick = 0;
307 /* Leaving quickack mode we deflate ATO. */
308 icsk->icsk_ack.ato = TCP_ATO_MIN;
310 icsk->icsk_ack.quick -= pkts;
314 extern void tcp_enter_quickack_mode(struct sock *sk);
316 static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
318 rx_opt->tstamp_ok = rx_opt->sack_ok = rx_opt->wscale_ok = rx_opt->snd_wscale = 0;
330 extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
332 const struct tcphdr *th);
334 extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
335 struct request_sock *req,
336 struct request_sock **prev);
337 extern int tcp_child_process(struct sock *parent,
339 struct sk_buff *skb);
340 extern void tcp_enter_frto(struct sock *sk);
341 extern void tcp_enter_loss(struct sock *sk, int how);
342 extern void tcp_clear_retrans(struct tcp_sock *tp);
343 extern void tcp_update_metrics(struct sock *sk);
345 extern void tcp_close(struct sock *sk,
347 extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
349 extern int tcp_getsockopt(struct sock *sk, int level,
353 extern int tcp_setsockopt(struct sock *sk, int level,
354 int optname, char __user *optval,
356 extern void tcp_set_keepalive(struct sock *sk, int val);
357 extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
359 size_t len, int nonblock,
360 int flags, int *addr_len);
362 extern void tcp_parse_options(struct sk_buff *skb,
363 struct tcp_options_received *opt_rx,
367 * TCP v4 functions exported for the inet6 API
370 extern void tcp_v4_send_check(struct sock *sk, int len,
371 struct sk_buff *skb);
373 extern int tcp_v4_conn_request(struct sock *sk,
374 struct sk_buff *skb);
376 extern struct sock * tcp_create_openreq_child(struct sock *sk,
377 struct request_sock *req,
378 struct sk_buff *skb);
380 extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk,
382 struct request_sock *req,
383 struct dst_entry *dst);
385 extern int tcp_v4_do_rcv(struct sock *sk,
386 struct sk_buff *skb);
388 extern int tcp_v4_connect(struct sock *sk,
389 struct sockaddr *uaddr,
392 extern int tcp_connect(struct sock *sk);
394 extern struct sk_buff * tcp_make_synack(struct sock *sk,
395 struct dst_entry *dst,
396 struct request_sock *req);
398 extern int tcp_disconnect(struct sock *sk, int flags);
400 extern void tcp_unhash(struct sock *sk);
402 extern int tcp_v4_hash_connecting(struct sock *sk);
405 /* From syncookies.c */
406 extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
407 struct ip_options *opt);
408 extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
413 extern void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp,
414 unsigned int cur_mss, int nonagle);
415 extern int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp);
416 extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
417 extern void tcp_xmit_retransmit_queue(struct sock *);
418 extern void tcp_simple_retransmit(struct sock *);
419 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
420 extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
422 extern void tcp_send_probe0(struct sock *);
423 extern void tcp_send_partial(struct sock *);
424 extern int tcp_write_wakeup(struct sock *);
425 extern void tcp_send_fin(struct sock *sk);
426 extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
427 extern int tcp_send_synack(struct sock *);
428 extern void tcp_push_one(struct sock *, unsigned int mss_now);
429 extern void tcp_send_ack(struct sock *sk);
430 extern void tcp_send_delayed_ack(struct sock *sk);
433 extern void tcp_cwnd_application_limited(struct sock *sk);
436 extern void tcp_init_xmit_timers(struct sock *);
437 static inline void tcp_clear_xmit_timers(struct sock *sk)
439 inet_csk_clear_xmit_timers(sk);
442 extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
443 extern unsigned int tcp_current_mss(struct sock *sk, int large);
446 extern void tcp_get_info(struct sock *, struct tcp_info *);
448 /* Read 'sendfile()'-style from a TCP socket */
449 typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
450 unsigned int, size_t);
451 extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
452 sk_read_actor_t recv_actor);
454 extern void tcp_initialize_rcv_mss(struct sock *sk);
456 extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
457 extern int tcp_mss_to_mtu(struct sock *sk, int mss);
458 extern void tcp_mtup_init(struct sock *sk);
460 static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
462 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
463 ntohl(TCP_FLAG_ACK) |
467 static inline void tcp_fast_path_on(struct tcp_sock *tp)
469 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
472 static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp)
474 if (skb_queue_empty(&tp->out_of_order_queue) &&
476 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
478 tcp_fast_path_on(tp);
481 /* Compute the actual receive window we are currently advertising.
482 * Rcv_nxt can be after the window if our peer push more data
483 * than the offered window.
485 static inline u32 tcp_receive_window(const struct tcp_sock *tp)
487 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
494 /* Choose a new window, without checks for shrinking, and without
495 * scaling applied to the result. The caller does these things
496 * if necessary. This is a "raw" window selection.
498 extern u32 __tcp_select_window(struct sock *sk);
500 /* TCP timestamps are only 32-bits, this causes a slight
501 * complication on 64-bit systems since we store a snapshot
502 * of jiffies in the buffer control blocks below. We decided
503 * to use only the low 32-bits of jiffies and hide the ugly
504 * casts with the following macro.
506 #define tcp_time_stamp ((__u32)(jiffies))
508 /* This is what the send packet queuing engine uses to pass
509 * TCP per-packet control information to the transmission
510 * code. We also store the host-order sequence numbers in
511 * here too. This is 36 bytes on 32-bit architectures,
512 * 40 bytes on 64-bit machines, if this grows please adjust
513 * skbuff.h:skbuff->cb[xxx] size appropriately.
517 struct inet_skb_parm h4;
518 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
519 struct inet6_skb_parm h6;
521 } header; /* For incoming frames */
522 __u32 seq; /* Starting sequence number */
523 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
524 __u32 when; /* used to compute rtt's */
525 __u8 flags; /* TCP header flags. */
527 /* NOTE: These must match up to the flags byte in a
530 #define TCPCB_FLAG_FIN 0x01
531 #define TCPCB_FLAG_SYN 0x02
532 #define TCPCB_FLAG_RST 0x04
533 #define TCPCB_FLAG_PSH 0x08
534 #define TCPCB_FLAG_ACK 0x10
535 #define TCPCB_FLAG_URG 0x20
536 #define TCPCB_FLAG_ECE 0x40
537 #define TCPCB_FLAG_CWR 0x80
539 __u8 sacked; /* State flags for SACK/FACK. */
540 #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
541 #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
542 #define TCPCB_LOST 0x04 /* SKB is lost */
543 #define TCPCB_TAGBITS 0x07 /* All tag bits */
545 #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
546 #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
548 #define TCPCB_URG 0x20 /* Urgent pointer advanced here */
550 #define TCPCB_AT_TAIL (TCPCB_URG)
552 __u16 urg_ptr; /* Valid w/URG flags is set. */
553 __u32 ack_seq; /* Sequence number ACK'd */
556 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
558 #include <net/tcp_ecn.h>
560 /* Due to TSO, an SKB can be composed of multiple actual
561 * packets. To keep these tracked properly, we use this.
563 static inline int tcp_skb_pcount(const struct sk_buff *skb)
565 return skb_shinfo(skb)->tso_segs;
568 /* This is valid iff tcp_skb_pcount() > 1. */
569 static inline int tcp_skb_mss(const struct sk_buff *skb)
571 return skb_shinfo(skb)->tso_size;
574 static inline void tcp_dec_pcount_approx(__u32 *count,
575 const struct sk_buff *skb)
578 *count -= tcp_skb_pcount(skb);
584 static inline void tcp_packets_out_inc(struct sock *sk,
586 const struct sk_buff *skb)
588 int orig = tp->packets_out;
590 tp->packets_out += tcp_skb_pcount(skb);
592 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
593 inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
596 static inline void tcp_packets_out_dec(struct tcp_sock *tp,
597 const struct sk_buff *skb)
599 tp->packets_out -= tcp_skb_pcount(skb);
602 /* Events passed to congestion control interface */
604 CA_EVENT_TX_START, /* first transmit when no packets in flight */
605 CA_EVENT_CWND_RESTART, /* congestion window restart */
606 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
607 CA_EVENT_FRTO, /* fast recovery timeout */
608 CA_EVENT_LOSS, /* loss timeout */
609 CA_EVENT_FAST_ACK, /* in sequence ack */
610 CA_EVENT_SLOW_ACK, /* other ack */
614 * Interface for adding new TCP congestion control handlers
616 #define TCP_CA_NAME_MAX 16
617 struct tcp_congestion_ops {
618 struct list_head list;
620 /* initialize private data (optional) */
621 void (*init)(struct sock *sk);
622 /* cleanup private data (optional) */
623 void (*release)(struct sock *sk);
625 /* return slow start threshold (required) */
626 u32 (*ssthresh)(struct sock *sk);
627 /* lower bound for congestion window (optional) */
628 u32 (*min_cwnd)(struct sock *sk);
629 /* do new cwnd calculation (required) */
630 void (*cong_avoid)(struct sock *sk, u32 ack,
631 u32 rtt, u32 in_flight, int good_ack);
632 /* round trip time sample per acked packet (optional) */
633 void (*rtt_sample)(struct sock *sk, u32 usrtt);
634 /* call before changing ca_state (optional) */
635 void (*set_state)(struct sock *sk, u8 new_state);
636 /* call when cwnd event occurs (optional) */
637 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
638 /* new value of cwnd after loss (optional) */
639 u32 (*undo_cwnd)(struct sock *sk);
640 /* hook for packet ack accounting (optional) */
641 void (*pkts_acked)(struct sock *sk, u32 num_acked);
642 /* get info for inet_diag (optional) */
643 void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
645 char name[TCP_CA_NAME_MAX];
646 struct module *owner;
649 extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
650 extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
652 extern void tcp_init_congestion_control(struct sock *sk);
653 extern void tcp_cleanup_congestion_control(struct sock *sk);
654 extern int tcp_set_default_congestion_control(const char *name);
655 extern void tcp_get_default_congestion_control(char *name);
656 extern int tcp_set_congestion_control(struct sock *sk, const char *name);
657 extern void tcp_slow_start(struct tcp_sock *tp);
659 extern struct tcp_congestion_ops tcp_init_congestion_ops;
660 extern u32 tcp_reno_ssthresh(struct sock *sk);
661 extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack,
662 u32 rtt, u32 in_flight, int flag);
663 extern u32 tcp_reno_min_cwnd(struct sock *sk);
664 extern struct tcp_congestion_ops tcp_reno;
666 static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
668 struct inet_connection_sock *icsk = inet_csk(sk);
670 if (icsk->icsk_ca_ops->set_state)
671 icsk->icsk_ca_ops->set_state(sk, ca_state);
672 icsk->icsk_ca_state = ca_state;
675 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
677 const struct inet_connection_sock *icsk = inet_csk(sk);
679 if (icsk->icsk_ca_ops->cwnd_event)
680 icsk->icsk_ca_ops->cwnd_event(sk, event);
683 /* This determines how many packets are "in the network" to the best
684 * of our knowledge. In many cases it is conservative, but where
685 * detailed information is available from the receiver (via SACK
686 * blocks etc.) we can make more aggressive calculations.
688 * Use this for decisions involving congestion control, use just
689 * tp->packets_out to determine if the send queue is empty or not.
691 * Read this equation as:
693 * "Packets sent once on transmission queue" MINUS
694 * "Packets left network, but not honestly ACKed yet" PLUS
695 * "Packets fast retransmitted"
697 static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
699 return (tp->packets_out - tp->left_out + tp->retrans_out);
702 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
703 * The exception is rate halving phase, when cwnd is decreasing towards
706 static inline __u32 tcp_current_ssthresh(const struct sock *sk)
708 const struct tcp_sock *tp = tcp_sk(sk);
709 if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
710 return tp->snd_ssthresh;
712 return max(tp->snd_ssthresh,
713 ((tp->snd_cwnd >> 1) +
714 (tp->snd_cwnd >> 2)));
717 static inline void tcp_sync_left_out(struct tcp_sock *tp)
719 if (tp->rx_opt.sack_ok &&
720 (tp->sacked_out >= tp->packets_out - tp->lost_out))
721 tp->sacked_out = tp->packets_out - tp->lost_out;
722 tp->left_out = tp->sacked_out + tp->lost_out;
725 extern void tcp_enter_cwr(struct sock *sk);
726 extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
728 /* Slow start with delack produces 3 packets of burst, so that
729 * it is safe "de facto".
731 static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
736 /* RFC2861 Check whether we are limited by application or congestion window
737 * This is the inverse of cwnd check in tcp_tso_should_defer
739 static inline int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
741 const struct tcp_sock *tp = tcp_sk(sk);
744 if (in_flight >= tp->snd_cwnd)
747 if (!(sk->sk_route_caps & NETIF_F_TSO))
750 left = tp->snd_cwnd - in_flight;
751 if (sysctl_tcp_tso_win_divisor)
752 return left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd;
754 return left <= tcp_max_burst(tp);
757 static inline void tcp_minshall_update(struct tcp_sock *tp, int mss,
758 const struct sk_buff *skb)
761 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
764 static inline void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp)
766 const struct inet_connection_sock *icsk = inet_csk(sk);
767 if (!tp->packets_out && !icsk->icsk_pending)
768 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
769 icsk->icsk_rto, TCP_RTO_MAX);
772 static inline void tcp_push_pending_frames(struct sock *sk,
775 __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle);
778 static inline void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq)
783 static inline void tcp_update_wl(struct tcp_sock *tp, u32 ack, u32 seq)
789 * Calculate(/check) TCP checksum
791 static inline u16 tcp_v4_check(struct tcphdr *th, int len,
792 unsigned long saddr, unsigned long daddr,
795 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
798 static inline int __tcp_checksum_complete(struct sk_buff *skb)
800 return __skb_checksum_complete(skb);
803 static inline int tcp_checksum_complete(struct sk_buff *skb)
805 return skb->ip_summed != CHECKSUM_UNNECESSARY &&
806 __tcp_checksum_complete(skb);
809 /* Prequeue for VJ style copy to user, combined with checksumming. */
811 static inline void tcp_prequeue_init(struct tcp_sock *tp)
813 tp->ucopy.task = NULL;
815 tp->ucopy.memory = 0;
816 skb_queue_head_init(&tp->ucopy.prequeue);
819 /* Packet is added to VJ-style prequeue for processing in process
820 * context, if a reader task is waiting. Apparently, this exciting
821 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
822 * failed somewhere. Latency? Burstiness? Well, at least now we will
823 * see, why it failed. 8)8) --ANK
825 * NOTE: is this not too big to inline?
827 static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
829 struct tcp_sock *tp = tcp_sk(sk);
831 if (!sysctl_tcp_low_latency && tp->ucopy.task) {
832 __skb_queue_tail(&tp->ucopy.prequeue, skb);
833 tp->ucopy.memory += skb->truesize;
834 if (tp->ucopy.memory > sk->sk_rcvbuf) {
835 struct sk_buff *skb1;
837 BUG_ON(sock_owned_by_user(sk));
839 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
840 sk->sk_backlog_rcv(sk, skb1);
841 NET_INC_STATS_BH(LINUX_MIB_TCPPREQUEUEDROPPED);
844 tp->ucopy.memory = 0;
845 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
846 wake_up_interruptible(sk->sk_sleep);
847 if (!inet_csk_ack_scheduled(sk))
848 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
849 (3 * TCP_RTO_MIN) / 4,
861 static const char *statename[]={
862 "Unused","Established","Syn Sent","Syn Recv",
863 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
864 "Close Wait","Last ACK","Listen","Closing"
868 static inline void tcp_set_state(struct sock *sk, int state)
870 int oldstate = sk->sk_state;
873 case TCP_ESTABLISHED:
874 if (oldstate != TCP_ESTABLISHED)
875 TCP_INC_STATS(TCP_MIB_CURRESTAB);
879 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
880 TCP_INC_STATS(TCP_MIB_ESTABRESETS);
882 sk->sk_prot->unhash(sk);
883 if (inet_csk(sk)->icsk_bind_hash &&
884 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
885 inet_put_port(&tcp_hashinfo, sk);
888 if (oldstate==TCP_ESTABLISHED)
889 TCP_DEC_STATS(TCP_MIB_CURRESTAB);
892 /* Change state AFTER socket is unhashed to avoid closed
893 * socket sitting in hash tables.
895 sk->sk_state = state;
898 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
902 static inline void tcp_done(struct sock *sk)
904 tcp_set_state(sk, TCP_CLOSE);
905 tcp_clear_xmit_timers(sk);
907 sk->sk_shutdown = SHUTDOWN_MASK;
909 if (!sock_flag(sk, SOCK_DEAD))
910 sk->sk_state_change(sk);
912 inet_csk_destroy_sock(sk);
915 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
918 rx_opt->eff_sacks = 0;
919 rx_opt->num_sacks = 0;
922 /* Determine a window scaling and initial window to offer. */
923 extern void tcp_select_initial_window(int __space, __u32 mss,
924 __u32 *rcv_wnd, __u32 *window_clamp,
925 int wscale_ok, __u8 *rcv_wscale);
927 static inline int tcp_win_from_space(int space)
929 return sysctl_tcp_adv_win_scale<=0 ?
930 (space>>(-sysctl_tcp_adv_win_scale)) :
931 space - (space>>sysctl_tcp_adv_win_scale);
934 /* Note: caller must be prepared to deal with negative returns */
935 static inline int tcp_space(const struct sock *sk)
937 return tcp_win_from_space(sk->sk_rcvbuf -
938 atomic_read(&sk->sk_rmem_alloc));
941 static inline int tcp_full_space(const struct sock *sk)
943 return tcp_win_from_space(sk->sk_rcvbuf);
946 static inline void tcp_openreq_init(struct request_sock *req,
947 struct tcp_options_received *rx_opt,
950 struct inet_request_sock *ireq = inet_rsk(req);
952 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
953 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
954 req->mss = rx_opt->mss_clamp;
955 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
956 ireq->tstamp_ok = rx_opt->tstamp_ok;
957 ireq->sack_ok = rx_opt->sack_ok;
958 ireq->snd_wscale = rx_opt->snd_wscale;
959 ireq->wscale_ok = rx_opt->wscale_ok;
962 ireq->rmt_port = skb->h.th->source;
965 extern void tcp_enter_memory_pressure(void);
967 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
969 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
972 static inline int keepalive_time_when(const struct tcp_sock *tp)
974 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
977 static inline int tcp_fin_time(const struct sock *sk)
979 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
980 const int rto = inet_csk(sk)->icsk_rto;
982 if (fin_timeout < (rto << 2) - (rto >> 1))
983 fin_timeout = (rto << 2) - (rto >> 1);
988 static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, int rst)
990 if ((s32)(rx_opt->rcv_tsval - rx_opt->ts_recent) >= 0)
992 if (xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)
995 /* RST segments are not recommended to carry timestamp,
996 and, if they do, it is recommended to ignore PAWS because
997 "their cleanup function should take precedence over timestamps."
998 Certainly, it is mistake. It is necessary to understand the reasons
999 of this constraint to relax it: if peer reboots, clock may go
1000 out-of-sync and half-open connections will not be reset.
1001 Actually, the problem would be not existing if all
1002 the implementations followed draft about maintaining clock
1003 via reboots. Linux-2.2 DOES NOT!
1005 However, we can relax time bounds for RST segments to MSL.
1007 if (rst && xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1012 #define TCP_CHECK_TIMER(sk) do { } while (0)
1014 static inline int tcp_use_frto(const struct sock *sk)
1016 const struct tcp_sock *tp = tcp_sk(sk);
1018 /* F-RTO must be activated in sysctl and there must be some
1019 * unsent new data, and the advertised window should allow
1022 return (sysctl_tcp_frto && sk->sk_send_head &&
1023 !after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
1024 tp->snd_una + tp->snd_wnd));
1027 static inline void tcp_mib_init(void)
1030 TCP_ADD_STATS_USER(TCP_MIB_RTOALGORITHM, 1);
1031 TCP_ADD_STATS_USER(TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1032 TCP_ADD_STATS_USER(TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1033 TCP_ADD_STATS_USER(TCP_MIB_MAXCONN, -1);
1037 static inline void clear_all_retrans_hints(struct tcp_sock *tp){
1038 tp->lost_skb_hint = NULL;
1039 tp->scoreboard_skb_hint = NULL;
1040 tp->retransmit_skb_hint = NULL;
1041 tp->forward_skb_hint = NULL;
1042 tp->fastpath_skb_hint = NULL;
1046 enum tcp_seq_states {
1047 TCP_SEQ_STATE_LISTENING,
1048 TCP_SEQ_STATE_OPENREQ,
1049 TCP_SEQ_STATE_ESTABLISHED,
1050 TCP_SEQ_STATE_TIME_WAIT,
1053 struct tcp_seq_afinfo {
1054 struct module *owner;
1057 int (*seq_show) (struct seq_file *m, void *v);
1058 struct file_operations *seq_fops;
1061 struct tcp_iter_state {
1063 enum tcp_seq_states state;
1064 struct sock *syn_wait_sk;
1065 int bucket, sbucket, num, uid;
1066 struct seq_operations seq_ops;
1069 extern int tcp_proc_register(struct tcp_seq_afinfo *afinfo);
1070 extern void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo);
1072 extern struct request_sock_ops tcp_request_sock_ops;
1074 extern int tcp_v4_destroy_sock(struct sock *sk);
1076 #ifdef CONFIG_PROC_FS
1077 extern int tcp4_proc_init(void);
1078 extern void tcp4_proc_exit(void);
1081 extern void tcp_v4_init(struct net_proto_family *ops);
1082 extern void tcp_init(void);