2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the TCP module.
8 * Version: @(#)tcp.h 1.0.5 05/23/93
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
22 #define FASTRETRANS_DEBUG 1
24 #include <linux/config.h>
25 #include <linux/list.h>
26 #include <linux/tcp.h>
27 #include <linux/slab.h>
28 #include <linux/cache.h>
29 #include <linux/percpu.h>
30 #include <linux/skbuff.h>
32 #include <net/inet_connection_sock.h>
33 #include <net/inet_timewait_sock.h>
34 #include <net/inet_hashtables.h>
35 #include <net/checksum.h>
36 #include <net/request_sock.h>
40 #include <net/tcp_states.h>
42 #include <linux/seq_file.h>
44 extern struct inet_hashinfo tcp_hashinfo;
46 extern atomic_t tcp_orphan_count;
47 extern void tcp_time_wait(struct sock *sk, int state, int timeo);
49 #define MAX_TCP_HEADER (128 + MAX_HEADER)
52 * Never offer a window over 32767 without using window scaling. Some
53 * poor stacks do signed 16bit maths!
55 #define MAX_TCP_WINDOW 32767U
57 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
58 #define TCP_MIN_MSS 88U
60 /* Minimal RCV_MSS. */
61 #define TCP_MIN_RCVMSS 536U
63 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
64 #define TCP_FASTRETRANS_THRESH 3
66 /* Maximal reordering. */
67 #define TCP_MAX_REORDERING 127
69 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
70 #define TCP_MAX_QUICKACKS 16U
73 #define TCP_URG_VALID 0x0100
74 #define TCP_URG_NOTYET 0x0200
75 #define TCP_URG_READ 0x0400
77 #define TCP_RETR1 3 /*
78 * This is how many retries it does before it
79 * tries to figure out if the gateway is
80 * down. Minimal RFC value is 3; it corresponds
81 * to ~3sec-8min depending on RTO.
84 #define TCP_RETR2 15 /*
85 * This should take at least
86 * 90 minutes to time out.
87 * RFC1122 says that the limit is 100 sec.
88 * 15 is ~13-30min depending on RTO.
91 #define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
92 * connection: ~180sec is RFC minimum */
94 #define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
95 * connection: ~180sec is RFC minimum */
98 #define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned
99 * socket. 7 is ~50sec-16min.
103 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
104 * state, about 60 seconds */
105 #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
106 /* BSD style FIN_WAIT2 deadlock breaker.
107 * It used to be 3min, new value is 60sec,
108 * to combine FIN-WAIT-2 timeout with
112 #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
114 #define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
115 #define TCP_ATO_MIN ((unsigned)(HZ/25))
117 #define TCP_DELACK_MIN 4U
118 #define TCP_ATO_MIN 4U
120 #define TCP_RTO_MAX ((unsigned)(120*HZ))
121 #define TCP_RTO_MIN ((unsigned)(HZ/5))
122 #define TCP_TIMEOUT_INIT ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value */
124 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
125 * for local resources.
128 #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
129 #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
130 #define TCP_KEEPALIVE_INTVL (75*HZ)
132 #define MAX_TCP_KEEPIDLE 32767
133 #define MAX_TCP_KEEPINTVL 32767
134 #define MAX_TCP_KEEPCNT 127
135 #define MAX_TCP_SYNCNT 127
137 #define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
138 #define TCP_SYNQ_HSIZE 512 /* Size of SYNACK hash table */
140 #define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
141 #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
142 * after this time. It should be equal
143 * (or greater than) TCP_TIMEWAIT_LEN
144 * to provide reliability equal to one
145 * provided by timewait state.
147 #define TCP_PAWS_WINDOW 1 /* Replay window for per-host
148 * timestamps. It must be less than
149 * minimal timewait lifetime.
155 #define TCPOPT_NOP 1 /* Padding */
156 #define TCPOPT_EOL 0 /* End of options */
157 #define TCPOPT_MSS 2 /* Segment size negotiating */
158 #define TCPOPT_WINDOW 3 /* Window scaling */
159 #define TCPOPT_SACK_PERM 4 /* SACK Permitted */
160 #define TCPOPT_SACK 5 /* SACK Block */
161 #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
167 #define TCPOLEN_MSS 4
168 #define TCPOLEN_WINDOW 3
169 #define TCPOLEN_SACK_PERM 2
170 #define TCPOLEN_TIMESTAMP 10
172 /* But this is what stacks really send out. */
173 #define TCPOLEN_TSTAMP_ALIGNED 12
174 #define TCPOLEN_WSCALE_ALIGNED 4
175 #define TCPOLEN_SACKPERM_ALIGNED 4
176 #define TCPOLEN_SACK_BASE 2
177 #define TCPOLEN_SACK_BASE_ALIGNED 4
178 #define TCPOLEN_SACK_PERBLOCK 8
180 /* Flags in tp->nonagle */
181 #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
182 #define TCP_NAGLE_CORK 2 /* Socket is corked */
183 #define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
185 extern struct inet_timewait_death_row tcp_death_row;
187 /* sysctl variables for tcp */
188 extern int sysctl_tcp_timestamps;
189 extern int sysctl_tcp_window_scaling;
190 extern int sysctl_tcp_sack;
191 extern int sysctl_tcp_fin_timeout;
192 extern int sysctl_tcp_keepalive_time;
193 extern int sysctl_tcp_keepalive_probes;
194 extern int sysctl_tcp_keepalive_intvl;
195 extern int sysctl_tcp_syn_retries;
196 extern int sysctl_tcp_synack_retries;
197 extern int sysctl_tcp_retries1;
198 extern int sysctl_tcp_retries2;
199 extern int sysctl_tcp_orphan_retries;
200 extern int sysctl_tcp_syncookies;
201 extern int sysctl_tcp_retrans_collapse;
202 extern int sysctl_tcp_stdurg;
203 extern int sysctl_tcp_rfc1337;
204 extern int sysctl_tcp_abort_on_overflow;
205 extern int sysctl_tcp_max_orphans;
206 extern int sysctl_tcp_fack;
207 extern int sysctl_tcp_reordering;
208 extern int sysctl_tcp_ecn;
209 extern int sysctl_tcp_dsack;
210 extern int sysctl_tcp_mem[3];
211 extern int sysctl_tcp_wmem[3];
212 extern int sysctl_tcp_rmem[3];
213 extern int sysctl_tcp_app_win;
214 extern int sysctl_tcp_adv_win_scale;
215 extern int sysctl_tcp_tw_reuse;
216 extern int sysctl_tcp_frto;
217 extern int sysctl_tcp_low_latency;
218 extern int sysctl_tcp_nometrics_save;
219 extern int sysctl_tcp_moderate_rcvbuf;
220 extern int sysctl_tcp_tso_win_divisor;
221 extern int sysctl_tcp_abc;
223 extern atomic_t tcp_memory_allocated;
224 extern atomic_t tcp_sockets_allocated;
225 extern int tcp_memory_pressure;
228 * The next routines deal with comparing 32 bit unsigned ints
229 * and worry about wraparound (automatic with unsigned arithmetic).
232 static inline int before(__u32 seq1, __u32 seq2)
234 return (__s32)(seq1-seq2) < 0;
237 static inline int after(__u32 seq1, __u32 seq2)
239 return (__s32)(seq2-seq1) < 0;
243 /* is s2<=s1<=s3 ? */
244 static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
246 return seq3 - seq2 >= seq1 - seq2;
250 extern struct proto tcp_prot;
252 DECLARE_SNMP_STAT(struct tcp_mib, tcp_statistics);
253 #define TCP_INC_STATS(field) SNMP_INC_STATS(tcp_statistics, field)
254 #define TCP_INC_STATS_BH(field) SNMP_INC_STATS_BH(tcp_statistics, field)
255 #define TCP_INC_STATS_USER(field) SNMP_INC_STATS_USER(tcp_statistics, field)
256 #define TCP_DEC_STATS(field) SNMP_DEC_STATS(tcp_statistics, field)
257 #define TCP_ADD_STATS_BH(field, val) SNMP_ADD_STATS_BH(tcp_statistics, field, val)
258 #define TCP_ADD_STATS_USER(field, val) SNMP_ADD_STATS_USER(tcp_statistics, field, val)
260 extern void tcp_v4_err(struct sk_buff *skb, u32);
262 extern void tcp_shutdown (struct sock *sk, int how);
264 extern int tcp_v4_rcv(struct sk_buff *skb);
266 extern int tcp_v4_remember_stamp(struct sock *sk);
268 extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
270 extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk,
271 struct msghdr *msg, size_t size);
272 extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags);
274 extern int tcp_ioctl(struct sock *sk,
278 extern int tcp_rcv_state_process(struct sock *sk,
283 extern int tcp_rcv_established(struct sock *sk,
288 extern void tcp_rcv_space_adjust(struct sock *sk);
290 extern int tcp_twsk_unique(struct sock *sk,
291 struct sock *sktw, void *twp);
293 static inline void tcp_dec_quickack_mode(struct sock *sk,
294 const unsigned int pkts)
296 struct inet_connection_sock *icsk = inet_csk(sk);
298 if (icsk->icsk_ack.quick) {
299 if (pkts >= icsk->icsk_ack.quick) {
300 icsk->icsk_ack.quick = 0;
301 /* Leaving quickack mode we deflate ATO. */
302 icsk->icsk_ack.ato = TCP_ATO_MIN;
304 icsk->icsk_ack.quick -= pkts;
308 extern void tcp_enter_quickack_mode(struct sock *sk);
310 static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
312 rx_opt->tstamp_ok = rx_opt->sack_ok = rx_opt->wscale_ok = rx_opt->snd_wscale = 0;
324 extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
326 const struct tcphdr *th);
328 extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
329 struct request_sock *req,
330 struct request_sock **prev);
331 extern int tcp_child_process(struct sock *parent,
333 struct sk_buff *skb);
334 extern void tcp_enter_frto(struct sock *sk);
335 extern void tcp_enter_loss(struct sock *sk, int how);
336 extern void tcp_clear_retrans(struct tcp_sock *tp);
337 extern void tcp_update_metrics(struct sock *sk);
339 extern void tcp_close(struct sock *sk,
341 extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
343 extern int tcp_getsockopt(struct sock *sk, int level,
347 extern int tcp_setsockopt(struct sock *sk, int level,
348 int optname, char __user *optval,
350 extern void tcp_set_keepalive(struct sock *sk, int val);
351 extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
353 size_t len, int nonblock,
354 int flags, int *addr_len);
356 extern void tcp_parse_options(struct sk_buff *skb,
357 struct tcp_options_received *opt_rx,
361 * TCP v4 functions exported for the inet6 API
364 extern void tcp_v4_send_check(struct sock *sk, int len,
365 struct sk_buff *skb);
367 extern int tcp_v4_conn_request(struct sock *sk,
368 struct sk_buff *skb);
370 extern struct sock * tcp_create_openreq_child(struct sock *sk,
371 struct request_sock *req,
372 struct sk_buff *skb);
374 extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk,
376 struct request_sock *req,
377 struct dst_entry *dst);
379 extern int tcp_v4_do_rcv(struct sock *sk,
380 struct sk_buff *skb);
382 extern int tcp_v4_connect(struct sock *sk,
383 struct sockaddr *uaddr,
386 extern int tcp_connect(struct sock *sk);
388 extern struct sk_buff * tcp_make_synack(struct sock *sk,
389 struct dst_entry *dst,
390 struct request_sock *req);
392 extern int tcp_disconnect(struct sock *sk, int flags);
394 extern void tcp_unhash(struct sock *sk);
396 extern int tcp_v4_hash_connecting(struct sock *sk);
399 /* From syncookies.c */
400 extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
401 struct ip_options *opt);
402 extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
407 extern void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp,
408 unsigned int cur_mss, int nonagle);
409 extern int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp);
410 extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
411 extern void tcp_xmit_retransmit_queue(struct sock *);
412 extern void tcp_simple_retransmit(struct sock *);
413 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
414 extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
416 extern void tcp_send_probe0(struct sock *);
417 extern void tcp_send_partial(struct sock *);
418 extern int tcp_write_wakeup(struct sock *);
419 extern void tcp_send_fin(struct sock *sk);
420 extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
421 extern int tcp_send_synack(struct sock *);
422 extern void tcp_push_one(struct sock *, unsigned int mss_now);
423 extern void tcp_send_ack(struct sock *sk);
424 extern void tcp_send_delayed_ack(struct sock *sk);
427 extern void tcp_cwnd_application_limited(struct sock *sk);
430 extern void tcp_init_xmit_timers(struct sock *);
431 static inline void tcp_clear_xmit_timers(struct sock *sk)
433 inet_csk_clear_xmit_timers(sk);
436 extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
437 extern unsigned int tcp_current_mss(struct sock *sk, int large);
440 extern void tcp_get_info(struct sock *, struct tcp_info *);
442 /* Read 'sendfile()'-style from a TCP socket */
443 typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
444 unsigned int, size_t);
445 extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
446 sk_read_actor_t recv_actor);
448 /* Initialize RCV_MSS value.
449 * RCV_MSS is an our guess about MSS used by the peer.
450 * We haven't any direct information about the MSS.
451 * It's better to underestimate the RCV_MSS rather than overestimate.
452 * Overestimations make us ACKing less frequently than needed.
453 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
456 static inline void tcp_initialize_rcv_mss(struct sock *sk)
458 struct tcp_sock *tp = tcp_sk(sk);
459 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
461 hint = min(hint, tp->rcv_wnd/2);
462 hint = min(hint, TCP_MIN_RCVMSS);
463 hint = max(hint, TCP_MIN_MSS);
465 inet_csk(sk)->icsk_ack.rcv_mss = hint;
468 static __inline__ void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
470 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
471 ntohl(TCP_FLAG_ACK) |
475 static __inline__ void tcp_fast_path_on(struct tcp_sock *tp)
477 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
480 static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp)
482 if (skb_queue_empty(&tp->out_of_order_queue) &&
484 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
486 tcp_fast_path_on(tp);
489 /* Compute the actual receive window we are currently advertising.
490 * Rcv_nxt can be after the window if our peer push more data
491 * than the offered window.
493 static __inline__ u32 tcp_receive_window(const struct tcp_sock *tp)
495 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
502 /* Choose a new window, without checks for shrinking, and without
503 * scaling applied to the result. The caller does these things
504 * if necessary. This is a "raw" window selection.
506 extern u32 __tcp_select_window(struct sock *sk);
508 /* TCP timestamps are only 32-bits, this causes a slight
509 * complication on 64-bit systems since we store a snapshot
510 * of jiffies in the buffer control blocks below. We decided
511 * to use only the low 32-bits of jiffies and hide the ugly
512 * casts with the following macro.
514 #define tcp_time_stamp ((__u32)(jiffies))
516 /* This is what the send packet queuing engine uses to pass
517 * TCP per-packet control information to the transmission
518 * code. We also store the host-order sequence numbers in
519 * here too. This is 36 bytes on 32-bit architectures,
520 * 40 bytes on 64-bit machines, if this grows please adjust
521 * skbuff.h:skbuff->cb[xxx] size appropriately.
525 struct inet_skb_parm h4;
526 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
527 struct inet6_skb_parm h6;
529 } header; /* For incoming frames */
530 __u32 seq; /* Starting sequence number */
531 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
532 __u32 when; /* used to compute rtt's */
533 __u8 flags; /* TCP header flags. */
535 /* NOTE: These must match up to the flags byte in a
538 #define TCPCB_FLAG_FIN 0x01
539 #define TCPCB_FLAG_SYN 0x02
540 #define TCPCB_FLAG_RST 0x04
541 #define TCPCB_FLAG_PSH 0x08
542 #define TCPCB_FLAG_ACK 0x10
543 #define TCPCB_FLAG_URG 0x20
544 #define TCPCB_FLAG_ECE 0x40
545 #define TCPCB_FLAG_CWR 0x80
547 __u8 sacked; /* State flags for SACK/FACK. */
548 #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
549 #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
550 #define TCPCB_LOST 0x04 /* SKB is lost */
551 #define TCPCB_TAGBITS 0x07 /* All tag bits */
553 #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
554 #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
556 #define TCPCB_URG 0x20 /* Urgent pointer advanced here */
558 #define TCPCB_AT_TAIL (TCPCB_URG)
560 __u16 urg_ptr; /* Valid w/URG flags is set. */
561 __u32 ack_seq; /* Sequence number ACK'd */
564 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
566 #include <net/tcp_ecn.h>
568 /* Due to TSO, an SKB can be composed of multiple actual
569 * packets. To keep these tracked properly, we use this.
571 static inline int tcp_skb_pcount(const struct sk_buff *skb)
573 return skb_shinfo(skb)->tso_segs;
576 /* This is valid iff tcp_skb_pcount() > 1. */
577 static inline int tcp_skb_mss(const struct sk_buff *skb)
579 return skb_shinfo(skb)->tso_size;
582 static inline void tcp_dec_pcount_approx(__u32 *count,
583 const struct sk_buff *skb)
586 *count -= tcp_skb_pcount(skb);
592 static inline void tcp_packets_out_inc(struct sock *sk,
594 const struct sk_buff *skb)
596 int orig = tp->packets_out;
598 tp->packets_out += tcp_skb_pcount(skb);
600 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
601 inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
604 static inline void tcp_packets_out_dec(struct tcp_sock *tp,
605 const struct sk_buff *skb)
607 tp->packets_out -= tcp_skb_pcount(skb);
610 /* Events passed to congestion control interface */
612 CA_EVENT_TX_START, /* first transmit when no packets in flight */
613 CA_EVENT_CWND_RESTART, /* congestion window restart */
614 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
615 CA_EVENT_FRTO, /* fast recovery timeout */
616 CA_EVENT_LOSS, /* loss timeout */
617 CA_EVENT_FAST_ACK, /* in sequence ack */
618 CA_EVENT_SLOW_ACK, /* other ack */
622 * Interface for adding new TCP congestion control handlers
624 #define TCP_CA_NAME_MAX 16
625 struct tcp_congestion_ops {
626 struct list_head list;
628 /* initialize private data (optional) */
629 void (*init)(struct sock *sk);
630 /* cleanup private data (optional) */
631 void (*release)(struct sock *sk);
633 /* return slow start threshold (required) */
634 u32 (*ssthresh)(struct sock *sk);
635 /* lower bound for congestion window (optional) */
636 u32 (*min_cwnd)(struct sock *sk);
637 /* do new cwnd calculation (required) */
638 void (*cong_avoid)(struct sock *sk, u32 ack,
639 u32 rtt, u32 in_flight, int good_ack);
640 /* round trip time sample per acked packet (optional) */
641 void (*rtt_sample)(struct sock *sk, u32 usrtt);
642 /* call before changing ca_state (optional) */
643 void (*set_state)(struct sock *sk, u8 new_state);
644 /* call when cwnd event occurs (optional) */
645 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
646 /* new value of cwnd after loss (optional) */
647 u32 (*undo_cwnd)(struct sock *sk);
648 /* hook for packet ack accounting (optional) */
649 void (*pkts_acked)(struct sock *sk, u32 num_acked);
650 /* get info for inet_diag (optional) */
651 void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
653 char name[TCP_CA_NAME_MAX];
654 struct module *owner;
657 extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
658 extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
660 extern void tcp_init_congestion_control(struct sock *sk);
661 extern void tcp_cleanup_congestion_control(struct sock *sk);
662 extern int tcp_set_default_congestion_control(const char *name);
663 extern void tcp_get_default_congestion_control(char *name);
664 extern int tcp_set_congestion_control(struct sock *sk, const char *name);
666 extern struct tcp_congestion_ops tcp_init_congestion_ops;
667 extern u32 tcp_reno_ssthresh(struct sock *sk);
668 extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack,
669 u32 rtt, u32 in_flight, int flag);
670 extern u32 tcp_reno_min_cwnd(struct sock *sk);
671 extern struct tcp_congestion_ops tcp_reno;
673 static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
675 struct inet_connection_sock *icsk = inet_csk(sk);
677 if (icsk->icsk_ca_ops->set_state)
678 icsk->icsk_ca_ops->set_state(sk, ca_state);
679 icsk->icsk_ca_state = ca_state;
682 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
684 const struct inet_connection_sock *icsk = inet_csk(sk);
686 if (icsk->icsk_ca_ops->cwnd_event)
687 icsk->icsk_ca_ops->cwnd_event(sk, event);
690 /* This determines how many packets are "in the network" to the best
691 * of our knowledge. In many cases it is conservative, but where
692 * detailed information is available from the receiver (via SACK
693 * blocks etc.) we can make more aggressive calculations.
695 * Use this for decisions involving congestion control, use just
696 * tp->packets_out to determine if the send queue is empty or not.
698 * Read this equation as:
700 * "Packets sent once on transmission queue" MINUS
701 * "Packets left network, but not honestly ACKed yet" PLUS
702 * "Packets fast retransmitted"
704 static __inline__ unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
706 return (tp->packets_out - tp->left_out + tp->retrans_out);
709 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
710 * The exception is rate halving phase, when cwnd is decreasing towards
713 static inline __u32 tcp_current_ssthresh(const struct sock *sk)
715 const struct tcp_sock *tp = tcp_sk(sk);
716 if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
717 return tp->snd_ssthresh;
719 return max(tp->snd_ssthresh,
720 ((tp->snd_cwnd >> 1) +
721 (tp->snd_cwnd >> 2)));
725 * Linear increase during slow start
727 static inline void tcp_slow_start(struct tcp_sock *tp)
729 if (sysctl_tcp_abc) {
730 /* RFC3465: Slow Start
731 * TCP sender SHOULD increase cwnd by the number of
732 * previously unacknowledged bytes ACKed by each incoming
733 * acknowledgment, provided the increase is not more than L
735 if (tp->bytes_acked < tp->mss_cache)
738 /* We MAY increase by 2 if discovered delayed ack */
739 if (sysctl_tcp_abc > 1 && tp->bytes_acked > 2*tp->mss_cache) {
740 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
746 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
751 static inline void tcp_sync_left_out(struct tcp_sock *tp)
753 if (tp->rx_opt.sack_ok &&
754 (tp->sacked_out >= tp->packets_out - tp->lost_out))
755 tp->sacked_out = tp->packets_out - tp->lost_out;
756 tp->left_out = tp->sacked_out + tp->lost_out;
759 /* Set slow start threshold and cwnd not falling to slow start */
760 static inline void __tcp_enter_cwr(struct sock *sk)
762 const struct inet_connection_sock *icsk = inet_csk(sk);
763 struct tcp_sock *tp = tcp_sk(sk);
766 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
767 tp->snd_cwnd = min(tp->snd_cwnd,
768 tcp_packets_in_flight(tp) + 1U);
769 tp->snd_cwnd_cnt = 0;
770 tp->high_seq = tp->snd_nxt;
771 tp->snd_cwnd_stamp = tcp_time_stamp;
772 TCP_ECN_queue_cwr(tp);
775 static inline void tcp_enter_cwr(struct sock *sk)
777 struct tcp_sock *tp = tcp_sk(sk);
779 tp->prior_ssthresh = 0;
781 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
783 tcp_set_ca_state(sk, TCP_CA_CWR);
787 extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
789 /* Slow start with delack produces 3 packets of burst, so that
790 * it is safe "de facto".
792 static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
797 /* RFC2861 Check whether we are limited by application or congestion window
798 * This is the inverse of cwnd check in tcp_tso_should_defer
800 static inline int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
802 const struct tcp_sock *tp = tcp_sk(sk);
805 if (in_flight >= tp->snd_cwnd)
808 if (!(sk->sk_route_caps & NETIF_F_TSO))
811 left = tp->snd_cwnd - in_flight;
812 if (sysctl_tcp_tso_win_divisor)
813 return left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd;
815 return left <= tcp_max_burst(tp);
818 static __inline__ void tcp_minshall_update(struct tcp_sock *tp, int mss,
819 const struct sk_buff *skb)
822 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
825 static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp)
827 const struct inet_connection_sock *icsk = inet_csk(sk);
828 if (!tp->packets_out && !icsk->icsk_pending)
829 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
830 icsk->icsk_rto, TCP_RTO_MAX);
833 static __inline__ void tcp_push_pending_frames(struct sock *sk,
836 __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle);
839 static __inline__ void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq)
844 static __inline__ void tcp_update_wl(struct tcp_sock *tp, u32 ack, u32 seq)
850 * Calculate(/check) TCP checksum
852 static __inline__ u16 tcp_v4_check(struct tcphdr *th, int len,
853 unsigned long saddr, unsigned long daddr,
856 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
859 static __inline__ int __tcp_checksum_complete(struct sk_buff *skb)
861 return __skb_checksum_complete(skb);
864 static __inline__ int tcp_checksum_complete(struct sk_buff *skb)
866 return skb->ip_summed != CHECKSUM_UNNECESSARY &&
867 __tcp_checksum_complete(skb);
870 /* Prequeue for VJ style copy to user, combined with checksumming. */
872 static __inline__ void tcp_prequeue_init(struct tcp_sock *tp)
874 tp->ucopy.task = NULL;
876 tp->ucopy.memory = 0;
877 skb_queue_head_init(&tp->ucopy.prequeue);
880 /* Packet is added to VJ-style prequeue for processing in process
881 * context, if a reader task is waiting. Apparently, this exciting
882 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
883 * failed somewhere. Latency? Burstiness? Well, at least now we will
884 * see, why it failed. 8)8) --ANK
886 * NOTE: is this not too big to inline?
888 static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
890 struct tcp_sock *tp = tcp_sk(sk);
892 if (!sysctl_tcp_low_latency && tp->ucopy.task) {
893 __skb_queue_tail(&tp->ucopy.prequeue, skb);
894 tp->ucopy.memory += skb->truesize;
895 if (tp->ucopy.memory > sk->sk_rcvbuf) {
896 struct sk_buff *skb1;
898 BUG_ON(sock_owned_by_user(sk));
900 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
901 sk->sk_backlog_rcv(sk, skb1);
902 NET_INC_STATS_BH(LINUX_MIB_TCPPREQUEUEDROPPED);
905 tp->ucopy.memory = 0;
906 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
907 wake_up_interruptible(sk->sk_sleep);
908 if (!inet_csk_ack_scheduled(sk))
909 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
910 (3 * TCP_RTO_MIN) / 4,
922 static const char *statename[]={
923 "Unused","Established","Syn Sent","Syn Recv",
924 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
925 "Close Wait","Last ACK","Listen","Closing"
929 static __inline__ void tcp_set_state(struct sock *sk, int state)
931 int oldstate = sk->sk_state;
934 case TCP_ESTABLISHED:
935 if (oldstate != TCP_ESTABLISHED)
936 TCP_INC_STATS(TCP_MIB_CURRESTAB);
940 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
941 TCP_INC_STATS(TCP_MIB_ESTABRESETS);
943 sk->sk_prot->unhash(sk);
944 if (inet_csk(sk)->icsk_bind_hash &&
945 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
946 inet_put_port(&tcp_hashinfo, sk);
949 if (oldstate==TCP_ESTABLISHED)
950 TCP_DEC_STATS(TCP_MIB_CURRESTAB);
953 /* Change state AFTER socket is unhashed to avoid closed
954 * socket sitting in hash tables.
956 sk->sk_state = state;
959 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
963 static __inline__ void tcp_done(struct sock *sk)
965 tcp_set_state(sk, TCP_CLOSE);
966 tcp_clear_xmit_timers(sk);
968 sk->sk_shutdown = SHUTDOWN_MASK;
970 if (!sock_flag(sk, SOCK_DEAD))
971 sk->sk_state_change(sk);
973 inet_csk_destroy_sock(sk);
976 static __inline__ void tcp_sack_reset(struct tcp_options_received *rx_opt)
979 rx_opt->eff_sacks = 0;
980 rx_opt->num_sacks = 0;
983 static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_sock *tp, __u32 tstamp)
985 if (tp->rx_opt.tstamp_ok) {
986 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
988 (TCPOPT_TIMESTAMP << 8) |
990 *ptr++ = htonl(tstamp);
991 *ptr++ = htonl(tp->rx_opt.ts_recent);
993 if (tp->rx_opt.eff_sacks) {
994 struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks;
997 *ptr++ = htonl((TCPOPT_NOP << 24) |
1000 (TCPOLEN_SACK_BASE + (tp->rx_opt.eff_sacks *
1001 TCPOLEN_SACK_PERBLOCK)));
1002 for(this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) {
1003 *ptr++ = htonl(sp[this_sack].start_seq);
1004 *ptr++ = htonl(sp[this_sack].end_seq);
1006 if (tp->rx_opt.dsack) {
1007 tp->rx_opt.dsack = 0;
1008 tp->rx_opt.eff_sacks--;
1013 /* Construct a tcp options header for a SYN or SYN_ACK packet.
1014 * If this is every changed make sure to change the definition of
1015 * MAX_SYN_SIZE to match the new maximum number of options that you
1018 static inline void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack,
1019 int offer_wscale, int wscale, __u32 tstamp, __u32 ts_recent)
1021 /* We always get an MSS option.
1022 * The option bytes which will be seen in normal data
1023 * packets should timestamps be used, must be in the MSS
1024 * advertised. But we subtract them from tp->mss_cache so
1025 * that calculations in tcp_sendmsg are simpler etc.
1026 * So account for this fact here if necessary. If we
1027 * don't do this correctly, as a receiver we won't
1028 * recognize data packets as being full sized when we
1029 * should, and thus we won't abide by the delayed ACK
1031 * SACKs don't matter, we never delay an ACK when we
1032 * have any of those going out.
1034 *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
1037 *ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) |
1038 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1040 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1041 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1042 *ptr++ = htonl(tstamp); /* TSVAL */
1043 *ptr++ = htonl(ts_recent); /* TSECR */
1045 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1046 (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM);
1048 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale));
1051 /* Determine a window scaling and initial window to offer. */
1052 extern void tcp_select_initial_window(int __space, __u32 mss,
1053 __u32 *rcv_wnd, __u32 *window_clamp,
1054 int wscale_ok, __u8 *rcv_wscale);
1056 static inline int tcp_win_from_space(int space)
1058 return sysctl_tcp_adv_win_scale<=0 ?
1059 (space>>(-sysctl_tcp_adv_win_scale)) :
1060 space - (space>>sysctl_tcp_adv_win_scale);
1063 /* Note: caller must be prepared to deal with negative returns */
1064 static inline int tcp_space(const struct sock *sk)
1066 return tcp_win_from_space(sk->sk_rcvbuf -
1067 atomic_read(&sk->sk_rmem_alloc));
1070 static inline int tcp_full_space(const struct sock *sk)
1072 return tcp_win_from_space(sk->sk_rcvbuf);
1075 static __inline__ void tcp_openreq_init(struct request_sock *req,
1076 struct tcp_options_received *rx_opt,
1077 struct sk_buff *skb)
1079 struct inet_request_sock *ireq = inet_rsk(req);
1081 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
1082 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
1083 req->mss = rx_opt->mss_clamp;
1084 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
1085 ireq->tstamp_ok = rx_opt->tstamp_ok;
1086 ireq->sack_ok = rx_opt->sack_ok;
1087 ireq->snd_wscale = rx_opt->snd_wscale;
1088 ireq->wscale_ok = rx_opt->wscale_ok;
1091 ireq->rmt_port = skb->h.th->source;
1094 extern void tcp_enter_memory_pressure(void);
1096 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1098 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
1101 static inline int keepalive_time_when(const struct tcp_sock *tp)
1103 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1106 static inline int tcp_fin_time(const struct sock *sk)
1108 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
1109 const int rto = inet_csk(sk)->icsk_rto;
1111 if (fin_timeout < (rto << 2) - (rto >> 1))
1112 fin_timeout = (rto << 2) - (rto >> 1);
1117 static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, int rst)
1119 if ((s32)(rx_opt->rcv_tsval - rx_opt->ts_recent) >= 0)
1121 if (xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)
1124 /* RST segments are not recommended to carry timestamp,
1125 and, if they do, it is recommended to ignore PAWS because
1126 "their cleanup function should take precedence over timestamps."
1127 Certainly, it is mistake. It is necessary to understand the reasons
1128 of this constraint to relax it: if peer reboots, clock may go
1129 out-of-sync and half-open connections will not be reset.
1130 Actually, the problem would be not existing if all
1131 the implementations followed draft about maintaining clock
1132 via reboots. Linux-2.2 DOES NOT!
1134 However, we can relax time bounds for RST segments to MSL.
1136 if (rst && xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1141 #define TCP_CHECK_TIMER(sk) do { } while (0)
1143 static inline int tcp_use_frto(const struct sock *sk)
1145 const struct tcp_sock *tp = tcp_sk(sk);
1147 /* F-RTO must be activated in sysctl and there must be some
1148 * unsent new data, and the advertised window should allow
1151 return (sysctl_tcp_frto && sk->sk_send_head &&
1152 !after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
1153 tp->snd_una + tp->snd_wnd));
1156 static inline void tcp_mib_init(void)
1159 TCP_ADD_STATS_USER(TCP_MIB_RTOALGORITHM, 1);
1160 TCP_ADD_STATS_USER(TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1161 TCP_ADD_STATS_USER(TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1162 TCP_ADD_STATS_USER(TCP_MIB_MAXCONN, -1);
1166 static inline void clear_all_retrans_hints(struct tcp_sock *tp){
1167 tp->lost_skb_hint = NULL;
1168 tp->scoreboard_skb_hint = NULL;
1169 tp->retransmit_skb_hint = NULL;
1170 tp->forward_skb_hint = NULL;
1171 tp->fastpath_skb_hint = NULL;
1175 enum tcp_seq_states {
1176 TCP_SEQ_STATE_LISTENING,
1177 TCP_SEQ_STATE_OPENREQ,
1178 TCP_SEQ_STATE_ESTABLISHED,
1179 TCP_SEQ_STATE_TIME_WAIT,
1182 struct tcp_seq_afinfo {
1183 struct module *owner;
1186 int (*seq_show) (struct seq_file *m, void *v);
1187 struct file_operations *seq_fops;
1190 struct tcp_iter_state {
1192 enum tcp_seq_states state;
1193 struct sock *syn_wait_sk;
1194 int bucket, sbucket, num, uid;
1195 struct seq_operations seq_ops;
1198 extern int tcp_proc_register(struct tcp_seq_afinfo *afinfo);
1199 extern void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo);
1201 extern struct request_sock_ops tcp_request_sock_ops;
1203 extern int tcp_v4_destroy_sock(struct sock *sk);
1205 #ifdef CONFIG_PROC_FS
1206 extern int tcp4_proc_init(void);
1207 extern void tcp4_proc_exit(void);
1210 extern void tcp_v4_init(struct net_proto_family *ops);
1211 extern void tcp_init(void);